]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - arch/x86/entry/entry_64.S
x86/entry/32: Remove unused resume_userspace label
[linux.git] / arch / x86 / entry / entry_64.S
index db43526cecfaff6e87d933157f4c1c93ca4f5a73..d58c0123945736d2218daa6ec83a689eab004163 100644 (file)
@@ -15,7 +15,7 @@
  *                     at the top of the kernel process stack.
  *
  * Some macro usage:
- * - ENTRY/END:                Define functions in the symbol table.
+ * - SYM_FUNC_START/END:Define functions in the symbol table.
  * - TRACE_IRQ_*:      Trace hardirq state for lock debugging.
  * - idtentry:         Define exception entry points.
  */
 .section .entry.text, "ax"
 
 #ifdef CONFIG_PARAVIRT
-ENTRY(native_usergs_sysret64)
+SYM_CODE_START(native_usergs_sysret64)
        UNWIND_HINT_EMPTY
        swapgs
        sysretq
-END(native_usergs_sysret64)
+SYM_CODE_END(native_usergs_sysret64)
 #endif /* CONFIG_PARAVIRT */
 
 .macro TRACE_IRQS_FLAGS flags:req
@@ -142,7 +142,7 @@ END(native_usergs_sysret64)
  * with them due to bugs in both AMD and Intel CPUs.
  */
 
-ENTRY(entry_SYSCALL_64)
+SYM_CODE_START(entry_SYSCALL_64)
        UNWIND_HINT_EMPTY
        /*
         * Interrupts are off on entry.
@@ -162,7 +162,7 @@ ENTRY(entry_SYSCALL_64)
        pushq   %r11                                    /* pt_regs->flags */
        pushq   $__USER_CS                              /* pt_regs->cs */
        pushq   %rcx                                    /* pt_regs->ip */
-GLOBAL(entry_SYSCALL_64_after_hwframe)
+SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL)
        pushq   %rax                                    /* pt_regs->orig_ax */
 
        PUSH_AND_CLEAR_REGS rax=$-ENOSYS
@@ -273,13 +273,13 @@ syscall_return_via_sysret:
        popq    %rdi
        popq    %rsp
        USERGS_SYSRET64
-END(entry_SYSCALL_64)
+SYM_CODE_END(entry_SYSCALL_64)
 
 /*
  * %rdi: prev task
  * %rsi: next task
  */
-ENTRY(__switch_to_asm)
+SYM_CODE_START(__switch_to_asm)
        UNWIND_HINT_FUNC
        /*
         * Save callee-saved registers
@@ -321,7 +321,7 @@ ENTRY(__switch_to_asm)
        popq    %rbp
 
        jmp     __switch_to
-END(__switch_to_asm)
+SYM_CODE_END(__switch_to_asm)
 
 /*
  * A newly forked process directly context switches into this address.
@@ -330,7 +330,7 @@ END(__switch_to_asm)
  * rbx: kernel thread func (NULL for user thread)
  * r12: kernel thread arg
  */
-ENTRY(ret_from_fork)
+SYM_CODE_START(ret_from_fork)
        UNWIND_HINT_EMPTY
        movq    %rax, %rdi
        call    schedule_tail                   /* rdi: 'prev' task parameter */
@@ -357,14 +357,14 @@ ENTRY(ret_from_fork)
         */
        movq    $0, RAX(%rsp)
        jmp     2b
-END(ret_from_fork)
+SYM_CODE_END(ret_from_fork)
 
 /*
  * Build the entry stubs with some assembler magic.
  * We pack 1 stub into every 8-byte block.
  */
        .align 8
-ENTRY(irq_entries_start)
+SYM_CODE_START(irq_entries_start)
     vector=FIRST_EXTERNAL_VECTOR
     .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
        UNWIND_HINT_IRET_REGS
@@ -373,10 +373,10 @@ ENTRY(irq_entries_start)
        .align  8
        vector=vector+1
     .endr
-END(irq_entries_start)
+SYM_CODE_END(irq_entries_start)
 
        .align 8
-ENTRY(spurious_entries_start)
+SYM_CODE_START(spurious_entries_start)
     vector=FIRST_SYSTEM_VECTOR
     .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR)
        UNWIND_HINT_IRET_REGS
@@ -385,7 +385,7 @@ ENTRY(spurious_entries_start)
        .align  8
        vector=vector+1
     .endr
-END(spurious_entries_start)
+SYM_CODE_END(spurious_entries_start)
 
 .macro DEBUG_ENTRY_ASSERT_IRQS_OFF
 #ifdef CONFIG_DEBUG_ENTRY
@@ -511,7 +511,7 @@ END(spurious_entries_start)
  * | return address                                    |
  * +----------------------------------------------------+
  */
-ENTRY(interrupt_entry)
+SYM_CODE_START(interrupt_entry)
        UNWIND_HINT_FUNC
        ASM_CLAC
        cld
@@ -579,7 +579,7 @@ ENTRY(interrupt_entry)
        TRACE_IRQS_OFF
 
        ret
-END(interrupt_entry)
+SYM_CODE_END(interrupt_entry)
 _ASM_NOKPROBE(interrupt_entry)
 
 
@@ -589,18 +589,18 @@ _ASM_NOKPROBE(interrupt_entry)
  * The interrupt stubs push (~vector+0x80) onto the stack and
  * then jump to common_spurious/interrupt.
  */
-common_spurious:
+SYM_CODE_START_LOCAL(common_spurious)
        addq    $-0x80, (%rsp)                  /* Adjust vector to [-256, -1] range */
        call    interrupt_entry
        UNWIND_HINT_REGS indirect=1
        call    smp_spurious_interrupt          /* rdi points to pt_regs */
        jmp     ret_from_intr
-END(common_spurious)
+SYM_CODE_END(common_spurious)
 _ASM_NOKPROBE(common_spurious)
 
 /* common_interrupt is a hotpath. Align it */
        .p2align CONFIG_X86_L1_CACHE_SHIFT
-common_interrupt:
+SYM_CODE_START_LOCAL(common_interrupt)
        addq    $-0x80, (%rsp)                  /* Adjust vector to [-256, -1] range */
        call    interrupt_entry
        UNWIND_HINT_REGS indirect=1
@@ -621,7 +621,7 @@ ret_from_intr:
        call    prepare_exit_to_usermode
        TRACE_IRQS_IRETQ
 
-GLOBAL(swapgs_restore_regs_and_return_to_usermode)
+SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
 #ifdef CONFIG_DEBUG_ENTRY
        /* Assert that pt_regs indicates user mode. */
        testb   $3, CS(%rsp)
@@ -679,7 +679,7 @@ retint_kernel:
         */
        TRACE_IRQS_IRETQ
 
-GLOBAL(restore_regs_and_return_to_kernel)
+SYM_INNER_LABEL(restore_regs_and_return_to_kernel, SYM_L_GLOBAL)
 #ifdef CONFIG_DEBUG_ENTRY
        /* Assert that pt_regs indicates kernel mode. */
        testb   $3, CS(%rsp)
@@ -695,7 +695,7 @@ GLOBAL(restore_regs_and_return_to_kernel)
         */
        INTERRUPT_RETURN
 
-ENTRY(native_iret)
+SYM_INNER_LABEL_ALIGN(native_iret, SYM_L_GLOBAL)
        UNWIND_HINT_IRET_REGS
        /*
         * Are we returning to a stack segment from the LDT?  Note: in
@@ -706,8 +706,7 @@ ENTRY(native_iret)
        jnz     native_irq_return_ldt
 #endif
 
-.global native_irq_return_iret
-native_irq_return_iret:
+SYM_INNER_LABEL(native_irq_return_iret, SYM_L_GLOBAL)
        /*
         * This may fault.  Non-paranoid faults on return to userspace are
         * handled by fixup_bad_iret.  These include #SS, #GP, and #NP.
@@ -789,14 +788,14 @@ native_irq_return_ldt:
         */
        jmp     native_irq_return_iret
 #endif
-END(common_interrupt)
+SYM_CODE_END(common_interrupt)
 _ASM_NOKPROBE(common_interrupt)
 
 /*
  * APIC interrupts.
  */
 .macro apicinterrupt3 num sym do_sym
-ENTRY(\sym)
+SYM_CODE_START(\sym)
        UNWIND_HINT_IRET_REGS
        pushq   $~(\num)
 .Lcommon_\sym:
@@ -804,7 +803,7 @@ ENTRY(\sym)
        UNWIND_HINT_REGS indirect=1
        call    \do_sym /* rdi points to pt_regs */
        jmp     ret_from_intr
-END(\sym)
+SYM_CODE_END(\sym)
 _ASM_NOKPROBE(\sym)
 .endm
 
@@ -969,7 +968,7 @@ apicinterrupt IRQ_WORK_VECTOR                       irq_work_interrupt              smp_irq_work_interrupt
  * #DF: if the thread stack is somehow unusable, we'll still get a useful OOPS.
  */
 .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 ist_offset=0 create_gap=0 read_cr2=0
-ENTRY(\sym)
+SYM_CODE_START(\sym)
        UNWIND_HINT_IRET_REGS offset=\has_error_code*8
 
        /* Sanity check */
@@ -1019,7 +1018,7 @@ ENTRY(\sym)
        .endif
 
 _ASM_NOKPROBE(\sym)
-END(\sym)
+SYM_CODE_END(\sym)
 .endm
 
 idtentry divide_error                  do_divide_error                 has_error_code=0
@@ -1041,7 +1040,7 @@ idtentry simd_coprocessor_error           do_simd_coprocessor_error       has_error_code=0
         * Reload gs selector with exception handling
         * edi:  new selector
         */
-ENTRY(native_load_gs_index)
+SYM_FUNC_START(native_load_gs_index)
        FRAME_BEGIN
        pushfq
        DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
@@ -1055,7 +1054,7 @@ ENTRY(native_load_gs_index)
        popfq
        FRAME_END
        ret
-ENDPROC(native_load_gs_index)
+SYM_FUNC_END(native_load_gs_index)
 EXPORT_SYMBOL(native_load_gs_index)
 
        _ASM_EXTABLE(.Lgs_change, .Lbad_gs)
@@ -1076,7 +1075,7 @@ SYM_CODE_END(.Lbad_gs)
        .previous
 
 /* Call softirq on interrupt stack. Interrupts are off. */
-ENTRY(do_softirq_own_stack)
+SYM_FUNC_START(do_softirq_own_stack)
        pushq   %rbp
        mov     %rsp, %rbp
        ENTER_IRQ_STACK regs=0 old_rsp=%r11
@@ -1084,7 +1083,7 @@ ENTRY(do_softirq_own_stack)
        LEAVE_IRQ_STACK regs=0
        leaveq
        ret
-ENDPROC(do_softirq_own_stack)
+SYM_FUNC_END(do_softirq_own_stack)
 
 #ifdef CONFIG_XEN_PV
 idtentry hypervisor_callback xen_do_hypervisor_callback has_error_code=0
@@ -1102,7 +1101,8 @@ idtentry hypervisor_callback xen_do_hypervisor_callback has_error_code=0
  * existing activation in its critical region -- if so, we pop the current
  * activation and restart the handler using the previous one.
  */
-ENTRY(xen_do_hypervisor_callback)              /* do_hypervisor_callback(struct *pt_regs) */
+/* do_hypervisor_callback(struct *pt_regs) */
+SYM_CODE_START_LOCAL(xen_do_hypervisor_callback)
 
 /*
  * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
@@ -1120,7 +1120,7 @@ ENTRY(xen_do_hypervisor_callback)         /* do_hypervisor_callback(struct *pt_regs) */
        call    xen_maybe_preempt_hcall
 #endif
        jmp     error_exit
-END(xen_do_hypervisor_callback)
+SYM_CODE_END(xen_do_hypervisor_callback)
 
 /*
  * Hypervisor uses this for application faults while it executes.
@@ -1135,7 +1135,7 @@ END(xen_do_hypervisor_callback)
  * We distinguish between categories by comparing each saved segment register
  * with its current contents: any discrepancy means we in category 1.
  */
-ENTRY(xen_failsafe_callback)
+SYM_CODE_START(xen_failsafe_callback)
        UNWIND_HINT_EMPTY
        movl    %ds, %ecx
        cmpw    %cx, 0x10(%rsp)
@@ -1165,7 +1165,7 @@ ENTRY(xen_failsafe_callback)
        PUSH_AND_CLEAR_REGS
        ENCODE_FRAME_POINTER
        jmp     error_exit
-END(xen_failsafe_callback)
+SYM_CODE_END(xen_failsafe_callback)
 #endif /* CONFIG_XEN_PV */
 
 #ifdef CONFIG_XEN_PVHVM
@@ -1215,7 +1215,7 @@ idtentry machine_check            do_mce                  has_error_code=0        paranoid=1
  * Use slow, but surefire "are we in kernel?" check.
  * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
  */
-ENTRY(paranoid_entry)
+SYM_CODE_START_LOCAL(paranoid_entry)
        UNWIND_HINT_FUNC
        cld
        PUSH_AND_CLEAR_REGS save_ret=1
@@ -1249,7 +1249,7 @@ ENTRY(paranoid_entry)
        FENCE_SWAPGS_KERNEL_ENTRY
 
        ret
-END(paranoid_entry)
+SYM_CODE_END(paranoid_entry)
 
 /*
  * "Paranoid" exit path from exception stack.  This is invoked
@@ -1263,7 +1263,7 @@ END(paranoid_entry)
  *
  * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it)
  */
-ENTRY(paranoid_exit)
+SYM_CODE_START_LOCAL(paranoid_exit)
        UNWIND_HINT_REGS
        DISABLE_INTERRUPTS(CLBR_ANY)
        TRACE_IRQS_OFF_DEBUG
@@ -1280,12 +1280,12 @@ ENTRY(paranoid_exit)
        RESTORE_CR3     scratch_reg=%rbx save_reg=%r14
 .Lparanoid_exit_restore:
        jmp restore_regs_and_return_to_kernel
-END(paranoid_exit)
+SYM_CODE_END(paranoid_exit)
 
 /*
  * Save all registers in pt_regs, and switch GS if needed.
  */
-ENTRY(error_entry)
+SYM_CODE_START_LOCAL(error_entry)
        UNWIND_HINT_FUNC
        cld
        PUSH_AND_CLEAR_REGS save_ret=1
@@ -1365,16 +1365,16 @@ ENTRY(error_entry)
        call    fixup_bad_iret
        mov     %rax, %rsp
        jmp     .Lerror_entry_from_usermode_after_swapgs
-END(error_entry)
+SYM_CODE_END(error_entry)
 
-ENTRY(error_exit)
+SYM_CODE_START_LOCAL(error_exit)
        UNWIND_HINT_REGS
        DISABLE_INTERRUPTS(CLBR_ANY)
        TRACE_IRQS_OFF
        testb   $3, CS(%rsp)
        jz      retint_kernel
        jmp     .Lretint_user
-END(error_exit)
+SYM_CODE_END(error_exit)
 
 /*
  * Runs on exception stack.  Xen PV does not go through this path at all,
@@ -1384,7 +1384,7 @@ END(error_exit)
  *     %r14: Used to save/restore the CR3 of the interrupted context
  *           when PAGE_TABLE_ISOLATION is in use.  Do not clobber.
  */
-ENTRY(nmi)
+SYM_CODE_START(nmi)
        UNWIND_HINT_IRET_REGS
 
        /*
@@ -1719,21 +1719,21 @@ nmi_restore:
         * about espfix64 on the way back to kernel mode.
         */
        iretq
-END(nmi)
+SYM_CODE_END(nmi)
 
 #ifndef CONFIG_IA32_EMULATION
 /*
  * This handles SYSCALL from 32-bit code.  There is no way to program
  * MSRs to fully disable 32-bit SYSCALL.
  */
-ENTRY(ignore_sysret)
+SYM_CODE_START(ignore_sysret)
        UNWIND_HINT_EMPTY
        mov     $-ENOSYS, %eax
        sysret
-END(ignore_sysret)
+SYM_CODE_END(ignore_sysret)
 #endif
 
-ENTRY(rewind_stack_do_exit)
+SYM_CODE_START(rewind_stack_do_exit)
        UNWIND_HINT_FUNC
        /* Prevent any naive code from trying to unwind to our caller. */
        xorl    %ebp, %ebp
@@ -1743,4 +1743,4 @@ ENTRY(rewind_stack_do_exit)
        UNWIND_HINT_FUNC sp_offset=PTREGS_SIZE
 
        call    do_exit
-END(rewind_stack_do_exit)
+SYM_CODE_END(rewind_stack_do_exit)