1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/jump_label.h>
3 #include <asm/unwind_hints.h>
4 #include <asm/cpufeatures.h>
5 #include <asm/page_types.h>
6 #include <asm/percpu.h>
7 #include <asm/asm-offsets.h>
8 #include <asm/processor-flags.h>
12 x86 function call convention, 64-bit:
13 -------------------------------------
14 arguments | callee-saved | extra caller-saved | return
15 [callee-clobbered] | | [callee-clobbered] |
16 ---------------------------------------------------------------------------
17 rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11 | rax, rdx [**]
19 ( rsp is obviously invariant across normal function calls. (gcc can 'merge'
20 functions when it sees tail-call optimization possibilities) rflags is
21 clobbered. Leftover arguments are passed over the stack frame.)
23 [*] In the frame-pointers case rbp is fixed to the stack frame.
25 [**] for struct return values wider than 64 bits the return convention is a
26 bit more complex: up to 128 bits width we return small structures
27 straight in rax, rdx. For structures larger than that (3 words or
28 larger) the caller puts a pointer to an on-stack return struct
29 [allocated in the caller's stack frame] into the first argument - i.e.
30 into rdi. All other arguments shift up by one in this case.
31 Fortunately this case is rare in the kernel.
33 For 32-bit we have the following conventions - kernel is built with
34 -mregparm=3 and -freg-struct-return:
36 x86 function calling convention, 32-bit:
37 ----------------------------------------
38 arguments | callee-saved | extra caller-saved | return
39 [callee-clobbered] | | [callee-clobbered] |
40 -------------------------------------------------------------------------
41 eax edx ecx | ebx edi esi ebp [*] | <none> | eax, edx [**]
43 ( here too esp is obviously invariant across normal function calls. eflags
44 is clobbered. Leftover arguments are passed over the stack frame. )
46 [*] In the frame-pointers case ebp is fixed to the stack frame.
48 [**] We build with -freg-struct-return, which on 32-bit means similar
49 semantics as on 64-bit: edx can be used for a second return value
50 (i.e. covering integer and structure sizes up to 64 bits) - after that
51 it gets more complex and more expensive: 3-word or larger struct returns
52 get done in the caller's frame and the pointer to the return struct goes
53 into regparm0, i.e. eax - the other arguments shift up and the
54 function's register parameters degenerate to regparm=2 in essence.
61 * 64-bit system call stack frame layout defines and helpers,
65 /* The layout forms the "struct pt_regs" on the stack: */
67 * C ABI says these regs are callee-preserved. They aren't saved on kernel entry
68 * unless syscall needs a complete, fully filled "struct pt_regs".
76 /* These regs are callee-clobbered. Always saved on kernel entry. */
87 * On syscall entry, this is syscall#. On CPU exception, this is error code.
88 * On hw interrupt, it's IRQ number:
91 /* Return frame for iretq */
98 #define SIZEOF_PTREGS 21*8
100 .macro ALLOC_PT_GPREGS_ON_STACK
104 .macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1
106 movq %r11, 6*8+\offset(%rsp)
109 movq %r10, 7*8+\offset(%rsp)
110 movq %r9, 8*8+\offset(%rsp)
111 movq %r8, 9*8+\offset(%rsp)
114 movq %rax, 10*8+\offset(%rsp)
117 movq %rcx, 11*8+\offset(%rsp)
119 movq %rdx, 12*8+\offset(%rsp)
120 movq %rsi, 13*8+\offset(%rsp)
121 movq %rdi, 14*8+\offset(%rsp)
122 UNWIND_HINT_REGS offset=\offset extra=0
124 .macro SAVE_C_REGS offset=0
125 SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1
127 .macro SAVE_C_REGS_EXCEPT_RAX_RCX offset=0
128 SAVE_C_REGS_HELPER \offset, 0, 0, 1, 1
130 .macro SAVE_C_REGS_EXCEPT_R891011
131 SAVE_C_REGS_HELPER 0, 1, 1, 0, 0
133 .macro SAVE_C_REGS_EXCEPT_RCX_R891011
134 SAVE_C_REGS_HELPER 0, 1, 0, 0, 0
136 .macro SAVE_C_REGS_EXCEPT_RAX_RCX_R11
137 SAVE_C_REGS_HELPER 0, 0, 0, 1, 0
140 .macro SAVE_EXTRA_REGS offset=0
141 movq %r15, 0*8+\offset(%rsp)
142 movq %r14, 1*8+\offset(%rsp)
143 movq %r13, 2*8+\offset(%rsp)
144 movq %r12, 3*8+\offset(%rsp)
145 movq %rbp, 4*8+\offset(%rsp)
146 movq %rbx, 5*8+\offset(%rsp)
147 UNWIND_HINT_REGS offset=\offset
150 .macro POP_EXTRA_REGS
176 * This is a sneaky trick to help the unwinder find pt_regs on the stack. The
177 * frame pointer is replaced with an encoded pointer to pt_regs. The encoding
178 * is just setting the LSB, which makes it an invalid stack address and is also
179 * a signal to the unwinder that it's a pt_regs pointer in disguise.
181 * NOTE: This macro must be used *after* SAVE_EXTRA_REGS because it corrupts
184 .macro ENCODE_FRAME_POINTER ptregs_offset=0
185 #ifdef CONFIG_FRAME_POINTER
187 leaq \ptregs_offset(%rsp), %rbp
195 #ifdef CONFIG_PAGE_TABLE_ISOLATION
198 * PAGE_TABLE_ISOLATION PGDs are 8k. Flip bit 12 to switch between the two
201 #define PTI_SWITCH_PGTABLES_MASK (1<<PAGE_SHIFT)
202 #define PTI_SWITCH_MASK (PTI_SWITCH_PGTABLES_MASK|(1<<X86_CR3_PTI_SWITCH_BIT))
204 .macro SET_NOFLUSH_BIT reg:req
205 bts $X86_CR3_PCID_NOFLUSH_BIT, \reg
208 .macro ADJUST_KERNEL_CR3 reg:req
209 ALTERNATIVE "", "SET_NOFLUSH_BIT \reg", X86_FEATURE_PCID
210 /* Clear PCID and "PAGE_TABLE_ISOLATION bit", point CR3 at kernel pagetables: */
211 andq $(~PTI_SWITCH_MASK), \reg
214 .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
215 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
216 mov %cr3, \scratch_reg
217 ADJUST_KERNEL_CR3 \scratch_reg
218 mov \scratch_reg, %cr3
222 #define THIS_CPU_user_pcid_flush_mask \
223 PER_CPU_VAR(cpu_tlbstate) + TLB_STATE_user_pcid_flush_mask
225 .macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req
226 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
227 mov %cr3, \scratch_reg
229 ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID
232 * Test if the ASID needs a flush.
234 movq \scratch_reg, \scratch_reg2
235 andq $(0x7FF), \scratch_reg /* mask ASID */
236 bt \scratch_reg, THIS_CPU_user_pcid_flush_mask
239 /* Flush needed, clear the bit */
240 btr \scratch_reg, THIS_CPU_user_pcid_flush_mask
241 movq \scratch_reg2, \scratch_reg
245 movq \scratch_reg2, \scratch_reg
246 SET_NOFLUSH_BIT \scratch_reg
249 /* Flip the PGD and ASID to the user version */
250 orq $(PTI_SWITCH_MASK), \scratch_reg
251 mov \scratch_reg, %cr3
255 .macro SWITCH_TO_USER_CR3_STACK scratch_reg:req
257 SWITCH_TO_USER_CR3_NOSTACK scratch_reg=\scratch_reg scratch_reg2=%rax
261 .macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
262 ALTERNATIVE "jmp .Ldone_\@", "", X86_FEATURE_PTI
263 movq %cr3, \scratch_reg
264 movq \scratch_reg, \save_reg
266 * Is the "switch mask" all zero? That means that both of
269 * 1. The user/kernel PCID bit, and
270 * 2. The user/kernel "bit" that points CR3 to the
271 * bottom half of the 8k PGD
273 * That indicates a kernel CR3 value, not a user CR3.
275 testq $(PTI_SWITCH_MASK), \scratch_reg
278 ADJUST_KERNEL_CR3 \scratch_reg
279 movq \scratch_reg, %cr3
284 .macro RESTORE_CR3 scratch_reg:req save_reg:req
285 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
287 ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID
290 * KERNEL pages can always resume with NOFLUSH as we do
293 bt $X86_CR3_PTI_SWITCH_BIT, \save_reg
297 * Check if there's a pending flush for the user ASID we're
300 movq \save_reg, \scratch_reg
301 andq $(0x7FF), \scratch_reg
302 bt \scratch_reg, THIS_CPU_user_pcid_flush_mask
305 btr \scratch_reg, THIS_CPU_user_pcid_flush_mask
309 SET_NOFLUSH_BIT \save_reg
313 * The CR3 write could be avoided when not changing its value,
314 * but would require a CR3 read *and* a scratch register.
320 #else /* CONFIG_PAGE_TABLE_ISOLATION=n: */
322 .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
324 .macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req
326 .macro SWITCH_TO_USER_CR3_STACK scratch_reg:req
328 .macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
330 .macro RESTORE_CR3 scratch_reg:req save_reg:req
335 #endif /* CONFIG_X86_64 */
338 * This does 'call enter_from_user_mode' unless we can avoid it based on
339 * kernel config or using the static jump infrastructure.
341 .macro CALL_enter_from_user_mode
342 #ifdef CONFIG_CONTEXT_TRACKING
343 #ifdef HAVE_JUMP_LABEL
344 STATIC_JUMP_IF_FALSE .Lafter_call_\@, context_tracking_enabled, def=0
346 call enter_from_user_mode