Change all assembly code which is marked using END (and not ENDPROC) to
appropriate new markings SYM_CODE_START and SYM_CODE_END.
And since the last user of END on X86 is gone now, make sure that END is
not defined there.
Signed-off-by: Jiri Slaby <jslaby@suse.cz>
Signed-off-by: Borislav Petkov <bp@suse.de>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: linux-arch@vger.kernel.org
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
Cc: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: x86-ml <x86@kernel.org>
Link: https://lkml.kernel.org/r/20191011115108.12392-27-jslaby@suse.cz
* %eax: prev task
* %edx: next task
*/
* %eax: prev task
* %edx: next task
*/
+SYM_CODE_START(__switch_to_asm)
/*
* Save callee-saved registers
* This must match the order in struct inactive_task_frame
/*
* Save callee-saved registers
* This must match the order in struct inactive_task_frame
popl %ebp
jmp __switch_to
popl %ebp
jmp __switch_to
+SYM_CODE_END(__switch_to_asm)
/*
* The unwinder expects the last frame on the stack to always be at the same
/*
* The unwinder expects the last frame on the stack to always be at the same
* ebx: kernel thread func (NULL for user thread)
* edi: kernel thread arg
*/
* ebx: kernel thread func (NULL for user thread)
* edi: kernel thread arg
*/
+SYM_CODE_START(ret_from_fork)
call schedule_tail_wrapper
testl %ebx, %ebx
call schedule_tail_wrapper
testl %ebx, %ebx
*/
movl $0, PT_EAX(%esp)
jmp 2b
*/
movl $0, PT_EAX(%esp)
jmp 2b
+SYM_CODE_END(ret_from_fork)
/*
* Return to user mode is not as complex as all this looks,
/*
* Return to user mode is not as complex as all this looks,
* We pack 1 stub into every 8-byte block.
*/
.align 8
* We pack 1 stub into every 8-byte block.
*/
.align 8
-ENTRY(irq_entries_start)
+SYM_CODE_START(irq_entries_start)
vector=FIRST_EXTERNAL_VECTOR
.rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
pushl $(~vector+0x80) /* Note: always in signed byte range */
vector=FIRST_EXTERNAL_VECTOR
.rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
pushl $(~vector+0x80) /* Note: always in signed byte range */
jmp common_interrupt
.align 8
.endr
jmp common_interrupt
.align 8
.endr
+SYM_CODE_END(irq_entries_start)
#ifdef CONFIG_X86_LOCAL_APIC
.align 8
#ifdef CONFIG_X86_LOCAL_APIC
.align 8
-ENTRY(spurious_entries_start)
+SYM_CODE_START(spurious_entries_start)
vector=FIRST_SYSTEM_VECTOR
.rept (NR_VECTORS - FIRST_SYSTEM_VECTOR)
pushl $(~vector+0x80) /* Note: always in signed byte range */
vector=FIRST_SYSTEM_VECTOR
.rept (NR_VECTORS - FIRST_SYSTEM_VECTOR)
pushl $(~vector+0x80) /* Note: always in signed byte range */
jmp common_spurious
.align 8
.endr
jmp common_spurious
.align 8
.endr
-END(spurious_entries_start)
+SYM_CODE_END(spurious_entries_start)
SYM_CODE_START_LOCAL(common_spurious)
ASM_CLAC
SYM_CODE_START_LOCAL(common_spurious)
ASM_CLAC
/* The include is where all of the SMP etc. interrupts come from */
#include <asm/entry_arch.h>
/* The include is where all of the SMP etc. interrupts come from */
#include <asm/entry_arch.h>
-ENTRY(coprocessor_error)
+SYM_CODE_START(coprocessor_error)
ASM_CLAC
pushl $0
pushl $do_coprocessor_error
jmp common_exception
ASM_CLAC
pushl $0
pushl $do_coprocessor_error
jmp common_exception
+SYM_CODE_END(coprocessor_error)
-ENTRY(simd_coprocessor_error)
+SYM_CODE_START(simd_coprocessor_error)
ASM_CLAC
pushl $0
#ifdef CONFIG_X86_INVD_BUG
ASM_CLAC
pushl $0
#ifdef CONFIG_X86_INVD_BUG
pushl $do_simd_coprocessor_error
#endif
jmp common_exception
pushl $do_simd_coprocessor_error
#endif
jmp common_exception
-END(simd_coprocessor_error)
+SYM_CODE_END(simd_coprocessor_error)
-ENTRY(device_not_available)
+SYM_CODE_START(device_not_available)
ASM_CLAC
pushl $-1 # mark this as an int
pushl $do_device_not_available
jmp common_exception
ASM_CLAC
pushl $-1 # mark this as an int
pushl $do_device_not_available
jmp common_exception
-END(device_not_available)
+SYM_CODE_END(device_not_available)
+SYM_CODE_START(native_iret)
iret
_ASM_EXTABLE(native_iret, iret_exc)
iret
_ASM_EXTABLE(native_iret, iret_exc)
+SYM_CODE_END(native_iret)
+SYM_CODE_START(overflow)
ASM_CLAC
pushl $0
pushl $do_overflow
jmp common_exception
ASM_CLAC
pushl $0
pushl $do_overflow
jmp common_exception
ASM_CLAC
pushl $0
pushl $do_bounds
jmp common_exception
ASM_CLAC
pushl $0
pushl $do_bounds
jmp common_exception
+SYM_CODE_START(invalid_op)
ASM_CLAC
pushl $0
pushl $do_invalid_op
jmp common_exception
ASM_CLAC
pushl $0
pushl $do_invalid_op
jmp common_exception
+SYM_CODE_END(invalid_op)
-ENTRY(coprocessor_segment_overrun)
+SYM_CODE_START(coprocessor_segment_overrun)
ASM_CLAC
pushl $0
pushl $do_coprocessor_segment_overrun
jmp common_exception
ASM_CLAC
pushl $0
pushl $do_coprocessor_segment_overrun
jmp common_exception
-END(coprocessor_segment_overrun)
+SYM_CODE_END(coprocessor_segment_overrun)
+SYM_CODE_START(invalid_TSS)
ASM_CLAC
pushl $do_invalid_TSS
jmp common_exception
ASM_CLAC
pushl $do_invalid_TSS
jmp common_exception
+SYM_CODE_END(invalid_TSS)
-ENTRY(segment_not_present)
+SYM_CODE_START(segment_not_present)
ASM_CLAC
pushl $do_segment_not_present
jmp common_exception
ASM_CLAC
pushl $do_segment_not_present
jmp common_exception
-END(segment_not_present)
+SYM_CODE_END(segment_not_present)
+SYM_CODE_START(stack_segment)
ASM_CLAC
pushl $do_stack_segment
jmp common_exception
ASM_CLAC
pushl $do_stack_segment
jmp common_exception
+SYM_CODE_END(stack_segment)
+SYM_CODE_START(alignment_check)
ASM_CLAC
pushl $do_alignment_check
jmp common_exception
ASM_CLAC
pushl $do_alignment_check
jmp common_exception
+SYM_CODE_END(alignment_check)
+SYM_CODE_START(divide_error)
ASM_CLAC
pushl $0 # no error code
pushl $do_divide_error
jmp common_exception
ASM_CLAC
pushl $0 # no error code
pushl $do_divide_error
jmp common_exception
+SYM_CODE_END(divide_error)
+SYM_CODE_START(machine_check)
ASM_CLAC
pushl $0
pushl machine_check_vector
jmp common_exception
ASM_CLAC
pushl $0
pushl machine_check_vector
jmp common_exception
+SYM_CODE_END(machine_check)
-ENTRY(spurious_interrupt_bug)
+SYM_CODE_START(spurious_interrupt_bug)
ASM_CLAC
pushl $0
pushl $do_spurious_interrupt_bug
jmp common_exception
ASM_CLAC
pushl $0
pushl $do_spurious_interrupt_bug
jmp common_exception
-END(spurious_interrupt_bug)
+SYM_CODE_END(spurious_interrupt_bug)
#ifdef CONFIG_XEN_PV
ENTRY(xen_hypervisor_callback)
#ifdef CONFIG_XEN_PV
ENTRY(xen_hypervisor_callback)
#endif /* CONFIG_HYPERV */
#endif /* CONFIG_HYPERV */
+SYM_CODE_START(page_fault)
ASM_CLAC
pushl $do_page_fault
jmp common_exception_read_cr2
ASM_CLAC
pushl $do_page_fault
jmp common_exception_read_cr2
+SYM_CODE_END(page_fault)
SYM_CODE_START_LOCAL_NOALIGN(common_exception_read_cr2)
/* the function address is in %gs's slot on the stack */
SYM_CODE_START_LOCAL_NOALIGN(common_exception_read_cr2)
/* the function address is in %gs's slot on the stack */
jmp ret_from_exception
SYM_CODE_END(common_exception)
jmp ret_from_exception
SYM_CODE_END(common_exception)
/*
* Entry from sysenter is now handled in common_exception
*/
/*
* Entry from sysenter is now handled in common_exception
*/
pushl $-1 # mark this as an int
pushl $do_debug
jmp common_exception
pushl $-1 # mark this as an int
pushl $do_debug
jmp common_exception
/*
* NMI is doubly nasty. It can happen on the first instruction of
/*
* NMI is doubly nasty. It can happen on the first instruction of
* switched stacks. We handle both conditions by simply checking whether we
* interrupted kernel code running on the SYSENTER stack.
*/
* switched stacks. We handle both conditions by simply checking whether we
* interrupted kernel code running on the SYSENTER stack.
*/
ASM_CLAC
#ifdef CONFIG_X86_ESPFIX32
ASM_CLAC
#ifdef CONFIG_X86_ESPFIX32
lss 12+4(%esp), %esp # back to espfix stack
jmp .Lirq_return
#endif
lss 12+4(%esp), %esp # back to espfix stack
jmp .Lirq_return
#endif
ASM_CLAC
pushl $-1 # mark this as an int
ASM_CLAC
pushl $-1 # mark this as an int
movl %esp, %eax # pt_regs pointer
call do_int3
jmp ret_from_exception
movl %esp, %eax # pt_regs pointer
call do_int3
jmp ret_from_exception
-ENTRY(general_protection)
+SYM_CODE_START(general_protection)
pushl $do_general_protection
jmp common_exception
pushl $do_general_protection
jmp common_exception
+SYM_CODE_END(general_protection)
+SYM_CODE_START(async_page_fault)
ASM_CLAC
pushl $do_async_page_fault
jmp common_exception_read_cr2
ASM_CLAC
pushl $do_async_page_fault
jmp common_exception_read_cr2
+SYM_CODE_END(async_page_fault)
-ENTRY(rewind_stack_do_exit)
+SYM_CODE_START(rewind_stack_do_exit)
/* Prevent any naive code from trying to unwind to our caller. */
xorl %ebp, %ebp
/* Prevent any naive code from trying to unwind to our caller. */
xorl %ebp, %ebp
-END(rewind_stack_do_exit)
+SYM_CODE_END(rewind_stack_do_exit)
ret
SYM_FUNC_END(function_hook)
ret
SYM_FUNC_END(function_hook)
+SYM_CODE_START(ftrace_caller)
#ifdef CONFIG_FRAME_POINTER
/*
#ifdef CONFIG_FRAME_POINTER
/*
/* This is weak to keep gas from relaxing the jumps */
WEAK(ftrace_stub)
ret
/* This is weak to keep gas from relaxing the jumps */
WEAK(ftrace_stub)
ret
+SYM_CODE_END(ftrace_caller)
SYM_CODE_START(ftrace_regs_caller)
/*
SYM_CODE_START(ftrace_regs_caller)
/*
SYM_CODE_END(ftrace_regs_caller)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
SYM_CODE_END(ftrace_regs_caller)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-ENTRY(ftrace_graph_caller)
+SYM_CODE_START(ftrace_graph_caller)
pushl %eax
pushl %ecx
pushl %edx
pushl %eax
pushl %ecx
pushl %edx
-END(ftrace_graph_caller)
+SYM_CODE_END(ftrace_graph_caller)
.globl return_to_handler
return_to_handler:
.globl return_to_handler
return_to_handler:
SYM_FUNC_START_WEAK(name)
#endif
SYM_FUNC_START_WEAK(name)
#endif
#ifndef END
/* deprecated, use SYM_FUNC_END, SYM_DATA_END, or SYM_END */
#define END(name) \
.size name, .-name
#endif
#ifndef END
/* deprecated, use SYM_FUNC_END, SYM_DATA_END, or SYM_END */
#define END(name) \
.size name, .-name
#endif
#ifndef CONFIG_X86_64
/* If symbol 'name' is treated as a subroutine (gets called, and returns)
#ifndef CONFIG_X86_64
/* If symbol 'name' is treated as a subroutine (gets called, and returns)