mirror of
				https://github.com/qemu/qemu.git
				synced 2025-10-30 19:15:42 +00:00 
			
		
		
		
	 4aa3f4dd5b
			
		
	
	
		4aa3f4dd5b
		
	
	
	
	
		
			
			Clang insists that "cmp" is ambiguous with a memory destination, requiring an explicit size suffix. There was a true error in the use of .cfi_def_cfa_offset in the epilogue, but changing to use the proper .cfi_adjust_cfa_offset runs afoul of a clang bug wrt .cfi_restore_state. Better to fold the two epilogues so that we don't trigger the bug. Signed-off-by: Richard Henderson <rth@twiddle.net>
		
			
				
	
	
		
			101 lines
		
	
	
		
			3.1 KiB
		
	
	
	
		
			ArmAsm
		
	
	
	
	
	
			
		
		
	
	
			101 lines
		
	
	
		
			3.1 KiB
		
	
	
	
		
			ArmAsm
		
	
	
	
	
	
| /*
 | |
|  * safe-syscall.inc.S : host-specific assembly fragment
 | |
|  * to handle signals occurring at the same time as system calls.
 | |
|  * This is intended to be included by linux-user/safe-syscall.S
 | |
|  *
 | |
|  * Written by Richard Henderson <rth@twiddle.net>
 | |
|  * Copyright (C) 2016 Red Hat, Inc.
 | |
|  *
 | |
|  * This work is licensed under the terms of the GNU GPL, version 2 or later.
 | |
|  * See the COPYING file in the top-level directory.
 | |
|  */
 | |
| 
 | |
| 	.global safe_syscall_base
 | |
| 	.global safe_syscall_start
 | |
| 	.global safe_syscall_end
 | |
| 	.type	safe_syscall_base, @function
 | |
| 
 | |
| 	/* This is the entry point for making a system call. The calling
 | |
| 	 * convention here is that of a C varargs function with the
 | |
| 	 * first argument an 'int *' to the signal_pending flag, the
 | |
| 	 * second one the system call number (as a 'long'), and all further
 | |
| 	 * arguments being syscall arguments (also 'long').
 | |
| 	 * We return a long which is the syscall's return value, which
 | |
| 	 * may be negative-errno on failure. Conversion to the
 | |
| 	 * -1-and-errno-set convention is done by the calling wrapper.
 | |
| 	 */
 | |
| safe_syscall_base:
 | |
| 	.cfi_startproc
 | |
| 	push	%ebp
 | |
| 	.cfi_adjust_cfa_offset 4
 | |
| 	.cfi_rel_offset ebp, 0
 | |
| 	push	%esi
 | |
| 	.cfi_adjust_cfa_offset 4
 | |
| 	.cfi_rel_offset esi, 0
 | |
| 	push	%edi
 | |
| 	.cfi_adjust_cfa_offset 4
 | |
| 	.cfi_rel_offset edi, 0
 | |
| 	push	%ebx
 | |
| 	.cfi_adjust_cfa_offset 4
 | |
| 	.cfi_rel_offset ebx, 0
 | |
| 
 | |
| 	/* The syscall calling convention isn't the same as the C one:
 | |
| 	 * we enter with 0(%esp) == return address
 | |
| 	 *               4(%esp) == *signal_pending
 | |
| 	 *               8(%esp) == syscall number
 | |
| 	 *               12(%esp) ... 32(%esp) == syscall arguments
 | |
| 	 *               and return the result in eax
 | |
| 	 * and the syscall instruction needs
 | |
| 	 *               eax == syscall number
 | |
| 	 *               ebx, ecx, edx, esi, edi, ebp == syscall arguments
 | |
| 	 *               and returns the result in eax
 | |
| 	 * Shuffle everything around appropriately.
 | |
| 	 * Note the 16 bytes that we pushed to save registers.
 | |
| 	 */
 | |
| 	mov	12+16(%esp), %ebx	/* the syscall arguments */
 | |
| 	mov	16+16(%esp), %ecx
 | |
| 	mov	20+16(%esp), %edx
 | |
| 	mov	24+16(%esp), %esi
 | |
| 	mov	28+16(%esp), %edi
 | |
| 	mov	32+16(%esp), %ebp
 | |
| 
 | |
| 	/* This next sequence of code works in conjunction with the
 | |
| 	 * rewind_if_safe_syscall_function(). If a signal is taken
 | |
| 	 * and the interrupted PC is anywhere between 'safe_syscall_start'
 | |
| 	 * and 'safe_syscall_end' then we rewind it to 'safe_syscall_start'.
 | |
| 	 * The code sequence must therefore be able to cope with this, and
 | |
| 	 * the syscall instruction must be the final one in the sequence.
 | |
| 	 */
 | |
| safe_syscall_start:
 | |
| 	/* if signal_pending is non-zero, don't do the call */
 | |
| 	mov	4+16(%esp), %eax	/* signal_pending */
 | |
| 	cmpl	$0, (%eax)
 | |
| 	jnz	1f
 | |
| 	mov	8+16(%esp), %eax	/* syscall number */
 | |
| 	int	$0x80
 | |
| safe_syscall_end:
 | |
| 	/* code path for having successfully executed the syscall */
 | |
| 	pop	%ebx
 | |
| 	.cfi_remember_state
 | |
| 	.cfi_adjust_cfa_offset -4
 | |
| 	.cfi_restore ebx
 | |
| 	pop	%edi
 | |
| 	.cfi_adjust_cfa_offset -4
 | |
| 	.cfi_restore edi
 | |
| 	pop	%esi
 | |
| 	.cfi_adjust_cfa_offset -4
 | |
| 	.cfi_restore esi
 | |
| 	pop	%ebp
 | |
| 	.cfi_adjust_cfa_offset -4
 | |
| 	.cfi_restore ebp
 | |
| 	ret
 | |
| 
 | |
| 1:
 | |
| 	/* code path when we didn't execute the syscall */
 | |
| 	.cfi_restore_state
 | |
| 	mov	$-TARGET_ERESTARTSYS, %eax
 | |
| 	jmp	safe_syscall_end
 | |
| 	.cfi_endproc
 | |
| 
 | |
| 	.size	safe_syscall_base, .-safe_syscall_base
 |