2 * Copyright (C) 1994 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 * x86-64 work by Andi Kleen 2002
10 #ifndef _ASM_X86_FPU_INTERNAL_H
11 #define _ASM_X86_FPU_INTERNAL_H
13 #include <linux/compat.h>
14 #include <linux/sched.h>
15 #include <linux/slab.h>
18 #include <asm/fpu/api.h>
19 #include <asm/fpu/xstate.h>
20 #include <asm/cpufeature.h>
21 #include <asm/trace/fpu.h>
24 * High level FPU state handling functions:
26 extern void fpu__activate_curr(struct fpu *fpu);
27 extern void fpu__activate_fpstate_read(struct fpu *fpu);
28 extern void fpu__activate_fpstate_write(struct fpu *fpu);
29 extern void fpu__current_fpstate_write_begin(void);
30 extern void fpu__current_fpstate_write_end(void);
31 extern void fpu__save(struct fpu *fpu);
32 extern void fpu__restore(struct fpu *fpu);
33 extern int fpu__restore_sig(void __user *buf, int ia32_frame);
34 extern void fpu__drop(struct fpu *fpu);
35 extern int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu);
36 extern void fpu__clear(struct fpu *fpu);
37 extern int fpu__exception_code(struct fpu *fpu, int trap_nr);
38 extern int dump_fpu(struct pt_regs *ptregs, struct user_i387_struct *fpstate);
41 * Boot time FPU initialization functions:
43 extern void fpu__init_cpu(void);
44 extern void fpu__init_system_xstate(void);
45 extern void fpu__init_cpu_xstate(void);
46 extern void fpu__init_system(struct cpuinfo_x86 *c);
47 extern void fpu__init_check_bugs(void);
48 extern void fpu__resume_cpu(void);
49 extern u64 fpu__get_supported_xfeatures_mask(void);
54 #ifdef CONFIG_X86_DEBUG_FPU
55 # define WARN_ON_FPU(x) WARN_ON_ONCE(x)
57 # define WARN_ON_FPU(x) ({ (void)(x); 0; })
61 * FPU related CPU feature flag helper routines:
63 static __always_inline __pure bool use_xsaveopt(void)
65 return static_cpu_has(X86_FEATURE_XSAVEOPT);
68 static __always_inline __pure bool use_xsave(void)
70 return static_cpu_has(X86_FEATURE_XSAVE);
73 static __always_inline __pure bool use_fxsr(void)
75 return static_cpu_has(X86_FEATURE_FXSR);
79 * fpstate handling functions:
82 extern union fpregs_state init_fpstate;
84 extern void fpstate_init(union fpregs_state *state);
85 #ifdef CONFIG_MATH_EMULATION
86 extern void fpstate_init_soft(struct swregs_state *soft);
88 static inline void fpstate_init_soft(struct swregs_state *soft) {}
90 static inline void fpstate_init_fxstate(struct fxregs_state *fx)
93 fx->mxcsr = MXCSR_DEFAULT;
95 extern void fpstate_sanitize_xstate(struct fpu *fpu);
97 #define user_insn(insn, output, input...) \
100 asm volatile(ASM_STAC "\n" \
102 "2: " ASM_CLAC "\n" \
103 ".section .fixup,\"ax\"\n" \
104 "3: movl $-1,%[err]\n" \
107 _ASM_EXTABLE(1b, 3b) \
108 : [err] "=r" (err), output \
113 #define check_insn(insn, output, input...) \
116 asm volatile("1:" #insn "\n\t" \
118 ".section .fixup,\"ax\"\n" \
119 "3: movl $-1,%[err]\n" \
122 _ASM_EXTABLE(1b, 3b) \
123 : [err] "=r" (err), output \
128 static inline int copy_fregs_to_user(struct fregs_state __user *fx)
130 return user_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx));
133 static inline int copy_fxregs_to_user(struct fxregs_state __user *fx)
135 if (IS_ENABLED(CONFIG_X86_32))
136 return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx));
137 else if (IS_ENABLED(CONFIG_AS_FXSAVEQ))
138 return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx));
140 /* See comment in copy_fxregs_to_kernel() below. */
141 return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx));
144 static inline void copy_kernel_to_fxregs(struct fxregs_state *fx)
148 if (IS_ENABLED(CONFIG_X86_32)) {
149 err = check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
151 if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) {
152 err = check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
154 /* See comment in copy_fxregs_to_kernel() below. */
155 err = check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), "m" (*fx));
158 /* Copying from a kernel buffer to FPU registers should never fail: */
162 static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
164 if (IS_ENABLED(CONFIG_X86_32))
165 return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
166 else if (IS_ENABLED(CONFIG_AS_FXSAVEQ))
167 return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
169 /* See comment in copy_fxregs_to_kernel() below. */
170 return user_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx),
174 static inline void copy_kernel_to_fregs(struct fregs_state *fx)
176 int err = check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
181 static inline int copy_user_to_fregs(struct fregs_state __user *fx)
183 return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
186 static inline void copy_fxregs_to_kernel(struct fpu *fpu)
188 if (IS_ENABLED(CONFIG_X86_32))
189 asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state.fxsave));
190 else if (IS_ENABLED(CONFIG_AS_FXSAVEQ))
191 asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave));
193 /* Using "rex64; fxsave %0" is broken because, if the memory
194 * operand uses any extended registers for addressing, a second
195 * REX prefix will be generated (to the assembler, rex64
196 * followed by semicolon is a separate instruction), and hence
197 * the 64-bitness is lost.
199 * Using "fxsaveq %0" would be the ideal choice, but is only
200 * supported starting with gas 2.16.
202 * Using, as a workaround, the properly prefixed form below
203 * isn't accepted by any binutils version so far released,
204 * complaining that the same type of prefix is used twice if
205 * an extended register is needed for addressing (fix submitted
206 * to mainline 2005-11-21).
208 * asm volatile("rex64/fxsave %0" : "=m" (fpu->state.fxsave));
210 * This, however, we can work around by forcing the compiler to
211 * select an addressing mode that doesn't require extended
214 asm volatile( "rex64/fxsave (%[fx])"
215 : "=m" (fpu->state.fxsave)
216 : [fx] "R" (&fpu->state.fxsave));
220 /* These macros all use (%edi)/(%rdi) as the single memory argument. */
221 #define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27"
222 #define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37"
223 #define XSAVES ".byte " REX_PREFIX "0x0f,0xc7,0x2f"
224 #define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f"
225 #define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f"
227 #define XSTATE_OP(op, st, lmask, hmask, err) \
228 asm volatile("1:" op "\n\t" \
229 "xor %[err], %[err]\n" \
231 ".pushsection .fixup,\"ax\"\n\t" \
232 "3: movl $-2,%[err]\n\t" \
235 _ASM_EXTABLE(1b, 3b) \
237 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
241 * If XSAVES is enabled, it replaces XSAVEOPT because it supports a compact
242 * format and supervisor states in addition to modified optimization in
245 * Otherwise, if XSAVEOPT is enabled, XSAVEOPT replaces XSAVE because XSAVEOPT
246 * supports modified optimization which is not supported by XSAVE.
248 * We use XSAVE as a fallback.
250 * The 661 label is defined in the ALTERNATIVE* macros as the address of the
251 * original instruction which gets replaced. We need to use it here as the
252 * address of the instruction where we might get an exception at.
254 #define XSTATE_XSAVE(st, lmask, hmask, err) \
255 asm volatile(ALTERNATIVE_2(XSAVE, \
256 XSAVEOPT, X86_FEATURE_XSAVEOPT, \
257 XSAVES, X86_FEATURE_XSAVES) \
259 "xor %[err], %[err]\n" \
261 ".pushsection .fixup,\"ax\"\n" \
262 "4: movl $-2, %[err]\n" \
265 _ASM_EXTABLE(661b, 4b) \
267 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
271 * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact
274 #define XSTATE_XRESTORE(st, lmask, hmask, err) \
275 asm volatile(ALTERNATIVE(XRSTOR, \
276 XRSTORS, X86_FEATURE_XSAVES) \
278 "xor %[err], %[err]\n" \
280 ".pushsection .fixup,\"ax\"\n" \
281 "4: movl $-2, %[err]\n" \
284 _ASM_EXTABLE(661b, 4b) \
286 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
290 * This function is called only during boot time when x86 caps are not set
291 * up and alternative can not be used yet.
293 static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate)
297 u32 hmask = mask >> 32;
300 WARN_ON(system_state != SYSTEM_BOOTING);
302 if (static_cpu_has(X86_FEATURE_XSAVES))
303 XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
305 XSTATE_OP(XSAVE, xstate, lmask, hmask, err);
307 /* We should never fault when copying to a kernel buffer: */
312 * This function is called only during boot time when x86 caps are not set
313 * up and alternative can not be used yet.
315 static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)
319 u32 hmask = mask >> 32;
322 WARN_ON(system_state != SYSTEM_BOOTING);
324 if (static_cpu_has(X86_FEATURE_XSAVES))
325 XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
327 XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
329 /* We should never fault when copying from a kernel buffer: */
334 * Save processor xstate to xsave area.
336 static inline void copy_xregs_to_kernel(struct xregs_state *xstate)
340 u32 hmask = mask >> 32;
343 WARN_ON(!alternatives_patched);
345 XSTATE_XSAVE(xstate, lmask, hmask, err);
347 /* We should never fault when copying to a kernel buffer: */
352 * Restore processor xstate from xsave area.
354 static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask)
357 u32 hmask = mask >> 32;
360 XSTATE_XRESTORE(xstate, lmask, hmask, err);
362 /* We should never fault when copying from a kernel buffer: */
367 * Save xstate to user space xsave area.
369 * We don't use modified optimization because xrstor/xrstors might track
370 * a different application.
372 * We don't use compacted format xsave area for
373 * backward compatibility for old applications which don't understand
374 * compacted format of xsave area.
376 static inline int copy_xregs_to_user(struct xregs_state __user *buf)
381 * Clear the xsave header first, so that reserved fields are
382 * initialized to zero.
384 err = __clear_user(&buf->header, sizeof(buf->header));
389 XSTATE_OP(XSAVE, buf, -1, -1, err);
396 * Restore xstate from user space xsave area.
398 static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask)
400 struct xregs_state *xstate = ((__force struct xregs_state *)buf);
402 u32 hmask = mask >> 32;
406 XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
413 * These must be called with preempt disabled. Returns
414 * 'true' if the FPU state is still intact and we can
415 * keep registers active.
417 * The legacy FNSAVE instruction cleared all FPU state
418 * unconditionally, so registers are essentially destroyed.
419 * Modern FPU state can be kept in registers, if there are
420 * no pending FP exceptions.
422 static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
424 if (likely(use_xsave())) {
425 copy_xregs_to_kernel(&fpu->state.xsave);
429 if (likely(use_fxsr())) {
430 copy_fxregs_to_kernel(fpu);
435 * Legacy FPU register saving, FNSAVE always clears FPU registers,
436 * so we have to mark them inactive:
438 asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->state.fsave));
443 static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate)
446 copy_kernel_to_xregs(&fpstate->xsave, -1);
449 copy_kernel_to_fxregs(&fpstate->fxsave);
451 copy_kernel_to_fregs(&fpstate->fsave);
455 static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate)
458 * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is
459 * pending. Clear the x87 state here by setting it to fixed values.
460 * "m" is a random variable that should be in L1.
462 if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK))) {
466 "fildl %P[addr]" /* set F?P to defined value */
467 : : [addr] "m" (fpstate));
470 __copy_kernel_to_fpregs(fpstate);
473 extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size);
476 * FPU context switch related helper methods:
479 DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
482 * The in-register FPU state for an FPU context on a CPU is assumed to be
483 * valid if the fpu->last_cpu matches the CPU, and the fpu_fpregs_owner_ctx
486 * If the FPU register state is valid, the kernel can skip restoring the
487 * FPU state from memory.
489 * Any code that clobbers the FPU registers or updates the in-memory
490 * FPU state for a task MUST let the rest of the kernel know that the
491 * FPU registers are no longer valid for this task.
493 * Either one of these invalidation functions is enough. Invalidate
494 * a resource you control: CPU if using the CPU for something else
495 * (with preemption disabled), FPU for the current task, or a task that
496 * is prevented from running by the current task.
498 static inline void __cpu_invalidate_fpregs_state(void)
500 __this_cpu_write(fpu_fpregs_owner_ctx, NULL);
503 static inline void __fpu_invalidate_fpregs_state(struct fpu *fpu)
508 static inline int fpregs_state_valid(struct fpu *fpu, unsigned int cpu)
510 return fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
514 * These generally need preemption protection to work,
515 * do try to avoid using these on their own:
517 static inline void fpregs_deactivate(struct fpu *fpu)
519 WARN_ON_FPU(!fpu->fpregs_active);
521 fpu->fpregs_active = 0;
522 this_cpu_write(fpu_fpregs_owner_ctx, NULL);
523 trace_x86_fpu_regs_deactivated(fpu);
526 static inline void fpregs_activate(struct fpu *fpu)
528 WARN_ON_FPU(fpu->fpregs_active);
530 fpu->fpregs_active = 1;
531 this_cpu_write(fpu_fpregs_owner_ctx, fpu);
532 trace_x86_fpu_regs_activated(fpu);
536 * The question "does this thread have fpu access?"
537 * is slightly racy, since preemption could come in
538 * and revoke it immediately after the test.
540 * However, even in that very unlikely scenario,
541 * we can just assume we have FPU access - typically
542 * to save the FP state - we'll just take a #NM
543 * fault and get the FPU access back.
545 static inline int fpregs_active(void)
547 return current->thread.fpu.fpregs_active;
551 * FPU state switching for scheduling.
553 * This is a two-stage process:
555 * - switch_fpu_prepare() saves the old state.
556 * This is done within the context of the old process.
558 * - switch_fpu_finish() restores the new state as
562 switch_fpu_prepare(struct fpu *old_fpu, int cpu)
564 if (old_fpu->fpregs_active) {
565 if (!copy_fpregs_to_fpstate(old_fpu))
566 old_fpu->last_cpu = -1;
568 old_fpu->last_cpu = cpu;
570 /* But leave fpu_fpregs_owner_ctx! */
571 old_fpu->fpregs_active = 0;
572 trace_x86_fpu_regs_deactivated(old_fpu);
574 old_fpu->last_cpu = -1;
578 * Misc helper functions:
582 * Set up the userspace FPU context for the new task, if the task
585 static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu)
587 bool preload = static_cpu_has(X86_FEATURE_FPU) &&
588 new_fpu->fpstate_active;
591 if (!fpregs_state_valid(new_fpu, cpu))
592 copy_kernel_to_fpregs(&new_fpu->state);
593 fpregs_activate(new_fpu);
598 * Needs to be preemption-safe.
600 * NOTE! user_fpu_begin() must be used only immediately before restoring
601 * the save state. It does not do any saving/restoring on its own. In
602 * lazy FPU mode, it is just an optimization to avoid a #NM exception,
603 * the task can lose the FPU right after preempt_enable().
605 static inline void user_fpu_begin(void)
607 struct fpu *fpu = ¤t->thread.fpu;
610 if (!fpregs_active())
611 fpregs_activate(fpu);
616 * MXCSR and XCR definitions:
619 extern unsigned int mxcsr_feature_mask;
621 #define XCR_XFEATURE_ENABLED_MASK 0x00000000
623 static inline u64 xgetbv(u32 index)
627 asm volatile(".byte 0x0f,0x01,0xd0" /* xgetbv */
628 : "=a" (eax), "=d" (edx)
630 return eax + ((u64)edx << 32);
633 static inline void xsetbv(u32 index, u64 value)
636 u32 edx = value >> 32;
638 asm volatile(".byte 0x0f,0x01,0xd1" /* xsetbv */
639 : : "a" (eax), "d" (edx), "c" (index));
642 #endif /* _ASM_X86_FPU_INTERNAL_H */