]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - arch/x86/entry/entry_32.S
x86/entry/32: Remove unused resume_userspace label
[linux.git] / arch / x86 / entry / entry_32.S
index f83ca5aa8b7794102a9bf23e0eb0123a5d162e82..4bbcc5e64969da609ea8d0de2b2e3a3cd43a1efa 100644 (file)
  * %eax: prev task
  * %edx: next task
  */
-ENTRY(__switch_to_asm)
+SYM_CODE_START(__switch_to_asm)
        /*
         * Save callee-saved registers
         * This must match the order in struct inactive_task_frame
@@ -748,7 +748,7 @@ ENTRY(__switch_to_asm)
        popl    %ebp
 
        jmp     __switch_to
-END(__switch_to_asm)
+SYM_CODE_END(__switch_to_asm)
 
 /*
  * The unwinder expects the last frame on the stack to always be at the same
@@ -757,7 +757,7 @@ END(__switch_to_asm)
  * asmlinkage function so its argument has to be pushed on the stack.  This
  * wrapper creates a proper "end of stack" frame header before the call.
  */
-ENTRY(schedule_tail_wrapper)
+SYM_FUNC_START(schedule_tail_wrapper)
        FRAME_BEGIN
 
        pushl   %eax
@@ -766,7 +766,7 @@ ENTRY(schedule_tail_wrapper)
 
        FRAME_END
        ret
-ENDPROC(schedule_tail_wrapper)
+SYM_FUNC_END(schedule_tail_wrapper)
 /*
  * A newly forked process directly context switches into this address.
  *
@@ -774,7 +774,7 @@ ENDPROC(schedule_tail_wrapper)
  * ebx: kernel thread func (NULL for user thread)
  * edi: kernel thread arg
  */
-ENTRY(ret_from_fork)
+SYM_CODE_START(ret_from_fork)
        call    schedule_tail_wrapper
 
        testl   %ebx, %ebx
@@ -797,7 +797,7 @@ ENTRY(ret_from_fork)
         */
        movl    $0, PT_EAX(%esp)
        jmp     2b
-END(ret_from_fork)
+SYM_CODE_END(ret_from_fork)
 
 /*
  * Return to user mode is not as complex as all this looks,
@@ -807,8 +807,7 @@ END(ret_from_fork)
  */
 
        # userspace resumption stub bypassing syscall exit tracing
-       ALIGN
-ret_from_exception:
+SYM_CODE_START_LOCAL(ret_from_exception)
        preempt_stop(CLBR_ANY)
 ret_from_intr:
 #ifdef CONFIG_VM86
@@ -825,15 +824,14 @@ ret_from_intr:
        cmpl    $USER_RPL, %eax
        jb      restore_all_kernel              # not returning to v8086 or userspace
 
-ENTRY(resume_userspace)
        DISABLE_INTERRUPTS(CLBR_ANY)
        TRACE_IRQS_OFF
        movl    %esp, %eax
        call    prepare_exit_to_usermode
        jmp     restore_all
-END(ret_from_exception)
+SYM_CODE_END(ret_from_exception)
 
-GLOBAL(__begin_SYSENTER_singlestep_region)
+SYM_ENTRY(__begin_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE)
 /*
  * All code from here through __end_SYSENTER_singlestep_region is subject
  * to being single-stepped if a user program sets TF and executes SYSENTER.
@@ -848,9 +846,10 @@ GLOBAL(__begin_SYSENTER_singlestep_region)
  * Xen doesn't set %esp to be precisely what the normal SYSENTER
  * entry point expects, so fix it up before using the normal path.
  */
-ENTRY(xen_sysenter_target)
+SYM_CODE_START(xen_sysenter_target)
        addl    $5*4, %esp                      /* remove xen-provided frame */
        jmp     .Lsysenter_past_esp
+SYM_CODE_END(xen_sysenter_target)
 #endif
 
 /*
@@ -885,7 +884,7 @@ ENTRY(xen_sysenter_target)
  * ebp  user stack
  * 0(%ebp) arg6
  */
-ENTRY(entry_SYSENTER_32)
+SYM_FUNC_START(entry_SYSENTER_32)
        /*
         * On entry-stack with all userspace-regs live - save and
         * restore eflags and %eax to use it as scratch-reg for the cr3
@@ -1012,8 +1011,8 @@ ENTRY(entry_SYSENTER_32)
        pushl   $X86_EFLAGS_FIXED
        popfl
        jmp     .Lsysenter_flags_fixed
-GLOBAL(__end_SYSENTER_singlestep_region)
-ENDPROC(entry_SYSENTER_32)
+SYM_ENTRY(__end_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE)
+SYM_FUNC_END(entry_SYSENTER_32)
 
 /*
  * 32-bit legacy system call entry.
@@ -1043,7 +1042,7 @@ ENDPROC(entry_SYSENTER_32)
  * edi  arg5
  * ebp  arg6
  */
-ENTRY(entry_INT80_32)
+SYM_FUNC_START(entry_INT80_32)
        ASM_CLAC
        pushl   %eax                    /* pt_regs->orig_ax */
 
@@ -1100,7 +1099,7 @@ restore_all_kernel:
        jmp     .Lirq_return
 
 .section .fixup, "ax"
-ENTRY(iret_exc )
+SYM_CODE_START(iret_exc)
        pushl   $0                              # no error code
        pushl   $do_iret_error
 
@@ -1117,9 +1116,10 @@ ENTRY(iret_exc   )
 #endif
 
        jmp     common_exception
+SYM_CODE_END(iret_exc)
 .previous
        _ASM_EXTABLE(.Lirq_return, iret_exc)
-ENDPROC(entry_INT80_32)
+SYM_FUNC_END(entry_INT80_32)
 
 .macro FIXUP_ESPFIX_STACK
 /*
@@ -1160,7 +1160,7 @@ ENDPROC(entry_INT80_32)
  * We pack 1 stub into every 8-byte block.
  */
        .align 8
-ENTRY(irq_entries_start)
+SYM_CODE_START(irq_entries_start)
     vector=FIRST_EXTERNAL_VECTOR
     .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
        pushl   $(~vector+0x80)                 /* Note: always in signed byte range */
@@ -1168,11 +1168,11 @@ ENTRY(irq_entries_start)
        jmp     common_interrupt
        .align  8
     .endr
-END(irq_entries_start)
+SYM_CODE_END(irq_entries_start)
 
 #ifdef CONFIG_X86_LOCAL_APIC
        .align 8
-ENTRY(spurious_entries_start)
+SYM_CODE_START(spurious_entries_start)
     vector=FIRST_SYSTEM_VECTOR
     .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR)
        pushl   $(~vector+0x80)                 /* Note: always in signed byte range */
@@ -1180,9 +1180,9 @@ ENTRY(spurious_entries_start)
        jmp     common_spurious
        .align  8
     .endr
-END(spurious_entries_start)
+SYM_CODE_END(spurious_entries_start)
 
-common_spurious:
+SYM_CODE_START_LOCAL(common_spurious)
        ASM_CLAC
        addl    $-0x80, (%esp)                  /* Adjust vector into the [-256, -1] range */
        SAVE_ALL switch_stacks=1
@@ -1191,7 +1191,7 @@ common_spurious:
        movl    %esp, %eax
        call    smp_spurious_interrupt
        jmp     ret_from_intr
-ENDPROC(common_spurious)
+SYM_CODE_END(common_spurious)
 #endif
 
 /*
@@ -1199,7 +1199,7 @@ ENDPROC(common_spurious)
  * so IRQ-flags tracing has to follow that:
  */
        .p2align CONFIG_X86_L1_CACHE_SHIFT
-common_interrupt:
+SYM_CODE_START_LOCAL(common_interrupt)
        ASM_CLAC
        addl    $-0x80, (%esp)                  /* Adjust vector into the [-256, -1] range */
 
@@ -1209,10 +1209,10 @@ common_interrupt:
        movl    %esp, %eax
        call    do_IRQ
        jmp     ret_from_intr
-ENDPROC(common_interrupt)
+SYM_CODE_END(common_interrupt)
 
 #define BUILD_INTERRUPT3(name, nr, fn)                 \
-ENTRY(name)                                            \
+SYM_FUNC_START(name)                                   \
        ASM_CLAC;                                       \
        pushl   $~(nr);                                 \
        SAVE_ALL switch_stacks=1;                       \
@@ -1221,7 +1221,7 @@ ENTRY(name)                                               \
        movl    %esp, %eax;                             \
        call    fn;                                     \
        jmp     ret_from_intr;                          \
-ENDPROC(name)
+SYM_FUNC_END(name)
 
 #define BUILD_INTERRUPT(name, nr)              \
        BUILD_INTERRUPT3(name, nr, smp_##name); \
@@ -1229,14 +1229,14 @@ ENDPROC(name)
 /* The include is where all of the SMP etc. interrupts come from */
 #include <asm/entry_arch.h>
 
-ENTRY(coprocessor_error)
+SYM_CODE_START(coprocessor_error)
        ASM_CLAC
        pushl   $0
        pushl   $do_coprocessor_error
        jmp     common_exception
-END(coprocessor_error)
+SYM_CODE_END(coprocessor_error)
 
-ENTRY(simd_coprocessor_error)
+SYM_CODE_START(simd_coprocessor_error)
        ASM_CLAC
        pushl   $0
 #ifdef CONFIG_X86_INVD_BUG
@@ -1248,99 +1248,99 @@ ENTRY(simd_coprocessor_error)
        pushl   $do_simd_coprocessor_error
 #endif
        jmp     common_exception
-END(simd_coprocessor_error)
+SYM_CODE_END(simd_coprocessor_error)
 
-ENTRY(device_not_available)
+SYM_CODE_START(device_not_available)
        ASM_CLAC
        pushl   $-1                             # mark this as an int
        pushl   $do_device_not_available
        jmp     common_exception
-END(device_not_available)
+SYM_CODE_END(device_not_available)
 
 #ifdef CONFIG_PARAVIRT
-ENTRY(native_iret)
+SYM_CODE_START(native_iret)
        iret
        _ASM_EXTABLE(native_iret, iret_exc)
-END(native_iret)
+SYM_CODE_END(native_iret)
 #endif
 
-ENTRY(overflow)
+SYM_CODE_START(overflow)
        ASM_CLAC
        pushl   $0
        pushl   $do_overflow
        jmp     common_exception
-END(overflow)
+SYM_CODE_END(overflow)
 
-ENTRY(bounds)
+SYM_CODE_START(bounds)
        ASM_CLAC
        pushl   $0
        pushl   $do_bounds
        jmp     common_exception
-END(bounds)
+SYM_CODE_END(bounds)
 
-ENTRY(invalid_op)
+SYM_CODE_START(invalid_op)
        ASM_CLAC
        pushl   $0
        pushl   $do_invalid_op
        jmp     common_exception
-END(invalid_op)
+SYM_CODE_END(invalid_op)
 
-ENTRY(coprocessor_segment_overrun)
+SYM_CODE_START(coprocessor_segment_overrun)
        ASM_CLAC
        pushl   $0
        pushl   $do_coprocessor_segment_overrun
        jmp     common_exception
-END(coprocessor_segment_overrun)
+SYM_CODE_END(coprocessor_segment_overrun)
 
-ENTRY(invalid_TSS)
+SYM_CODE_START(invalid_TSS)
        ASM_CLAC
        pushl   $do_invalid_TSS
        jmp     common_exception
-END(invalid_TSS)
+SYM_CODE_END(invalid_TSS)
 
-ENTRY(segment_not_present)
+SYM_CODE_START(segment_not_present)
        ASM_CLAC
        pushl   $do_segment_not_present
        jmp     common_exception
-END(segment_not_present)
+SYM_CODE_END(segment_not_present)
 
-ENTRY(stack_segment)
+SYM_CODE_START(stack_segment)
        ASM_CLAC
        pushl   $do_stack_segment
        jmp     common_exception
-END(stack_segment)
+SYM_CODE_END(stack_segment)
 
-ENTRY(alignment_check)
+SYM_CODE_START(alignment_check)
        ASM_CLAC
        pushl   $do_alignment_check
        jmp     common_exception
-END(alignment_check)
+SYM_CODE_END(alignment_check)
 
-ENTRY(divide_error)
+SYM_CODE_START(divide_error)
        ASM_CLAC
        pushl   $0                              # no error code
        pushl   $do_divide_error
        jmp     common_exception
-END(divide_error)
+SYM_CODE_END(divide_error)
 
 #ifdef CONFIG_X86_MCE
-ENTRY(machine_check)
+SYM_CODE_START(machine_check)
        ASM_CLAC
        pushl   $0
        pushl   machine_check_vector
        jmp     common_exception
-END(machine_check)
+SYM_CODE_END(machine_check)
 #endif
 
-ENTRY(spurious_interrupt_bug)
+SYM_CODE_START(spurious_interrupt_bug)
        ASM_CLAC
        pushl   $0
        pushl   $do_spurious_interrupt_bug
        jmp     common_exception
-END(spurious_interrupt_bug)
+SYM_CODE_END(spurious_interrupt_bug)
 
 #ifdef CONFIG_XEN_PV
-ENTRY(xen_hypervisor_callback)
+SYM_FUNC_START(xen_hypervisor_callback)
        pushl   $-1                             /* orig_ax = -1 => not a system call */
        SAVE_ALL
        ENCODE_FRAME_POINTER
@@ -1361,14 +1361,14 @@ ENTRY(xen_hypervisor_callback)
 
        jmp     xen_iret_crit_fixup
 
-ENTRY(xen_do_upcall)
+SYM_INNER_LABEL_ALIGN(xen_do_upcall, SYM_L_GLOBAL)
 1:     mov     %esp, %eax
        call    xen_evtchn_do_upcall
 #ifndef CONFIG_PREEMPTION
        call    xen_maybe_preempt_hcall
 #endif
        jmp     ret_from_intr
-ENDPROC(xen_hypervisor_callback)
+SYM_FUNC_END(xen_hypervisor_callback)
 
 /*
  * Hypervisor uses this for application faults while it executes.
@@ -1382,7 +1382,7 @@ ENDPROC(xen_hypervisor_callback)
  * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
  * We distinguish between categories by maintaining a status value in EAX.
  */
-ENTRY(xen_failsafe_callback)
+SYM_FUNC_START(xen_failsafe_callback)
        pushl   %eax
        movl    $1, %eax
 1:     mov     4(%esp), %ds
@@ -1419,7 +1419,7 @@ ENTRY(xen_failsafe_callback)
        _ASM_EXTABLE(2b, 7b)
        _ASM_EXTABLE(3b, 8b)
        _ASM_EXTABLE(4b, 9b)
-ENDPROC(xen_failsafe_callback)
+SYM_FUNC_END(xen_failsafe_callback)
 #endif /* CONFIG_XEN_PV */
 
 #ifdef CONFIG_XEN_PVHVM
@@ -1441,13 +1441,13 @@ BUILD_INTERRUPT3(hv_stimer0_callback_vector, HYPERV_STIMER0_VECTOR,
 
 #endif /* CONFIG_HYPERV */
 
-ENTRY(page_fault)
+SYM_CODE_START(page_fault)
        ASM_CLAC
        pushl   $do_page_fault
        jmp     common_exception_read_cr2
-END(page_fault)
+SYM_CODE_END(page_fault)
 
-common_exception_read_cr2:
+SYM_CODE_START_LOCAL_NOALIGN(common_exception_read_cr2)
        /* the function address is in %gs's slot on the stack */
        SAVE_ALL switch_stacks=1 skip_gs=1
 
@@ -1470,9 +1470,9 @@ common_exception_read_cr2:
        movl    %esp, %eax                      # pt_regs pointer
        CALL_NOSPEC %edi
        jmp     ret_from_exception
-END(common_exception_read_cr2)
+SYM_CODE_END(common_exception_read_cr2)
 
-common_exception:
+SYM_CODE_START_LOCAL_NOALIGN(common_exception)
        /* the function address is in %gs's slot on the stack */
        SAVE_ALL switch_stacks=1 skip_gs=1
        ENCODE_FRAME_POINTER
@@ -1492,9 +1492,9 @@ common_exception:
        movl    %esp, %eax                      # pt_regs pointer
        CALL_NOSPEC %edi
        jmp     ret_from_exception
-END(common_exception)
+SYM_CODE_END(common_exception)
 
-ENTRY(debug)
+SYM_CODE_START(debug)
        /*
         * Entry from sysenter is now handled in common_exception
         */
@@ -1502,7 +1502,7 @@ ENTRY(debug)
        pushl   $-1                             # mark this as an int
        pushl   $do_debug
        jmp     common_exception
-END(debug)
+SYM_CODE_END(debug)
 
 /*
  * NMI is doubly nasty.  It can happen on the first instruction of
@@ -1511,7 +1511,7 @@ END(debug)
  * switched stacks.  We handle both conditions by simply checking whether we
  * interrupted kernel code running on the SYSENTER stack.
  */
-ENTRY(nmi)
+SYM_CODE_START(nmi)
        ASM_CLAC
 
 #ifdef CONFIG_X86_ESPFIX32
@@ -1576,9 +1576,9 @@ ENTRY(nmi)
        lss     12+4(%esp), %esp                # back to espfix stack
        jmp     .Lirq_return
 #endif
-END(nmi)
+SYM_CODE_END(nmi)
 
-ENTRY(int3)
+SYM_CODE_START(int3)
        ASM_CLAC
        pushl   $-1                             # mark this as an int
 
@@ -1589,22 +1589,22 @@ ENTRY(int3)
        movl    %esp, %eax                      # pt_regs pointer
        call    do_int3
        jmp     ret_from_exception
-END(int3)
+SYM_CODE_END(int3)
 
-ENTRY(general_protection)
+SYM_CODE_START(general_protection)
        pushl   $do_general_protection
        jmp     common_exception
-END(general_protection)
+SYM_CODE_END(general_protection)
 
 #ifdef CONFIG_KVM_GUEST
-ENTRY(async_page_fault)
+SYM_CODE_START(async_page_fault)
        ASM_CLAC
        pushl   $do_async_page_fault
        jmp     common_exception_read_cr2
-END(async_page_fault)
+SYM_CODE_END(async_page_fault)
 #endif
 
-ENTRY(rewind_stack_do_exit)
+SYM_CODE_START(rewind_stack_do_exit)
        /* Prevent any naive code from trying to unwind to our caller. */
        xorl    %ebp, %ebp
 
@@ -1613,4 +1613,4 @@ ENTRY(rewind_stack_do_exit)
 
        call    do_exit
 1:     jmp 1b
-END(rewind_stack_do_exit)
+SYM_CODE_END(rewind_stack_do_exit)