]> asedeno.scripts.mit.edu Git - linux.git/blob - arch/x86/kernel/fpu/init.c
Merge branches 'pm-core', 'pm-qos', 'pm-domains' and 'pm-opp'
[linux.git] / arch / x86 / kernel / fpu / init.c
1 /*
2  * x86 FPU boot time init code:
3  */
4 #include <asm/fpu/internal.h>
5 #include <asm/tlbflush.h>
6 #include <asm/setup.h>
7 #include <asm/cmdline.h>
8
9 #include <linux/sched.h>
10 #include <linux/init.h>
11
12 /*
13  * Initialize the registers found in all CPUs, CR0 and CR4:
14  */
15 static void fpu__init_cpu_generic(void)
16 {
17         unsigned long cr0;
18         unsigned long cr4_mask = 0;
19
20         if (boot_cpu_has(X86_FEATURE_FXSR))
21                 cr4_mask |= X86_CR4_OSFXSR;
22         if (boot_cpu_has(X86_FEATURE_XMM))
23                 cr4_mask |= X86_CR4_OSXMMEXCPT;
24         if (cr4_mask)
25                 cr4_set_bits(cr4_mask);
26
27         cr0 = read_cr0();
28         cr0 &= ~(X86_CR0_TS|X86_CR0_EM); /* clear TS and EM */
29         if (!boot_cpu_has(X86_FEATURE_FPU))
30                 cr0 |= X86_CR0_EM;
31         write_cr0(cr0);
32
33         /* Flush out any pending x87 state: */
34 #ifdef CONFIG_MATH_EMULATION
35         if (!boot_cpu_has(X86_FEATURE_FPU))
36                 fpstate_init_soft(&current->thread.fpu.state.soft);
37         else
38 #endif
39                 asm volatile ("fninit");
40 }
41
42 /*
43  * Enable all supported FPU features. Called when a CPU is brought online:
44  */
45 void fpu__init_cpu(void)
46 {
47         fpu__init_cpu_generic();
48         fpu__init_cpu_xstate();
49 }
50
51 static bool fpu__probe_without_cpuid(void)
52 {
53         unsigned long cr0;
54         u16 fsw, fcw;
55
56         fsw = fcw = 0xffff;
57
58         cr0 = read_cr0();
59         cr0 &= ~(X86_CR0_TS | X86_CR0_EM);
60         write_cr0(cr0);
61
62         asm volatile("fninit ; fnstsw %0 ; fnstcw %1" : "+m" (fsw), "+m" (fcw));
63
64         pr_info("x86/fpu: Probing for FPU: FSW=0x%04hx FCW=0x%04hx\n", fsw, fcw);
65
66         return fsw == 0 && (fcw & 0x103f) == 0x003f;
67 }
68
69 static void fpu__init_system_early_generic(struct cpuinfo_x86 *c)
70 {
71         if (!boot_cpu_has(X86_FEATURE_CPUID) &&
72             !test_bit(X86_FEATURE_FPU, (unsigned long *)cpu_caps_cleared)) {
73                 if (fpu__probe_without_cpuid())
74                         setup_force_cpu_cap(X86_FEATURE_FPU);
75                 else
76                         setup_clear_cpu_cap(X86_FEATURE_FPU);
77         }
78
79 #ifndef CONFIG_MATH_EMULATION
80         if (!test_cpu_cap(&boot_cpu_data, X86_FEATURE_FPU)) {
81                 pr_emerg("x86/fpu: Giving up, no FPU found and no math emulation present\n");
82                 for (;;)
83                         asm volatile("hlt");
84         }
85 #endif
86 }
87
88 /*
89  * Boot time FPU feature detection code:
90  */
91 unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
92
93 static void __init fpu__init_system_mxcsr(void)
94 {
95         unsigned int mask = 0;
96
97         if (boot_cpu_has(X86_FEATURE_FXSR)) {
98                 /* Static because GCC does not get 16-byte stack alignment right: */
99                 static struct fxregs_state fxregs __initdata;
100
101                 asm volatile("fxsave %0" : "+m" (fxregs));
102
103                 mask = fxregs.mxcsr_mask;
104
105                 /*
106                  * If zero then use the default features mask,
107                  * which has all features set, except the
108                  * denormals-are-zero feature bit:
109                  */
110                 if (mask == 0)
111                         mask = 0x0000ffbf;
112         }
113         mxcsr_feature_mask &= mask;
114 }
115
116 /*
117  * Once per bootup FPU initialization sequences that will run on most x86 CPUs:
118  */
119 static void __init fpu__init_system_generic(void)
120 {
121         /*
122          * Set up the legacy init FPU context. (xstate init might overwrite this
123          * with a more modern format, if the CPU supports it.)
124          */
125         fpstate_init(&init_fpstate);
126
127         fpu__init_system_mxcsr();
128 }
129
130 /*
131  * Size of the FPU context state. All tasks in the system use the
132  * same context size, regardless of what portion they use.
133  * This is inherent to the XSAVE architecture which puts all state
134  * components into a single, continuous memory block:
135  */
136 unsigned int fpu_kernel_xstate_size;
137 EXPORT_SYMBOL_GPL(fpu_kernel_xstate_size);
138
139 /* Get alignment of the TYPE. */
140 #define TYPE_ALIGN(TYPE) offsetof(struct { char x; TYPE test; }, test)
141
142 /*
143  * Enforce that 'MEMBER' is the last field of 'TYPE'.
144  *
145  * Align the computed size with alignment of the TYPE,
146  * because that's how C aligns structs.
147  */
148 #define CHECK_MEMBER_AT_END_OF(TYPE, MEMBER) \
149         BUILD_BUG_ON(sizeof(TYPE) != ALIGN(offsetofend(TYPE, MEMBER), \
150                                            TYPE_ALIGN(TYPE)))
151
152 /*
153  * We append the 'struct fpu' to the task_struct:
154  */
155 static void __init fpu__init_task_struct_size(void)
156 {
157         int task_size = sizeof(struct task_struct);
158
159         /*
160          * Subtract off the static size of the register state.
161          * It potentially has a bunch of padding.
162          */
163         task_size -= sizeof(((struct task_struct *)0)->thread.fpu.state);
164
165         /*
166          * Add back the dynamically-calculated register state
167          * size.
168          */
169         task_size += fpu_kernel_xstate_size;
170
171         /*
172          * We dynamically size 'struct fpu', so we require that
173          * it be at the end of 'thread_struct' and that
174          * 'thread_struct' be at the end of 'task_struct'.  If
175          * you hit a compile error here, check the structure to
176          * see if something got added to the end.
177          */
178         CHECK_MEMBER_AT_END_OF(struct fpu, state);
179         CHECK_MEMBER_AT_END_OF(struct thread_struct, fpu);
180         CHECK_MEMBER_AT_END_OF(struct task_struct, thread);
181
182         arch_task_struct_size = task_size;
183 }
184
185 /*
186  * Set up the user and kernel xstate sizes based on the legacy FPU context size.
187  *
188  * We set this up first, and later it will be overwritten by
189  * fpu__init_system_xstate() if the CPU knows about xstates.
190  */
191 static void __init fpu__init_system_xstate_size_legacy(void)
192 {
193         static int on_boot_cpu __initdata = 1;
194
195         WARN_ON_FPU(!on_boot_cpu);
196         on_boot_cpu = 0;
197
198         /*
199          * Note that xstate sizes might be overwritten later during
200          * fpu__init_system_xstate().
201          */
202
203         if (!boot_cpu_has(X86_FEATURE_FPU)) {
204                 /*
205                  * Disable xsave as we do not support it if i387
206                  * emulation is enabled.
207                  */
208                 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
209                 setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
210                 fpu_kernel_xstate_size = sizeof(struct swregs_state);
211         } else {
212                 if (boot_cpu_has(X86_FEATURE_FXSR))
213                         fpu_kernel_xstate_size =
214                                 sizeof(struct fxregs_state);
215                 else
216                         fpu_kernel_xstate_size =
217                                 sizeof(struct fregs_state);
218         }
219
220         fpu_user_xstate_size = fpu_kernel_xstate_size;
221 }
222
223 /*
224  * Find supported xfeatures based on cpu features and command-line input.
225  * This must be called after fpu__init_parse_early_param() is called and
226  * xfeatures_mask is enumerated.
227  */
228 u64 __init fpu__get_supported_xfeatures_mask(void)
229 {
230         return XCNTXT_MASK;
231 }
232
233 /* Legacy code to initialize eager fpu mode. */
234 static void __init fpu__init_system_ctx_switch(void)
235 {
236         static bool on_boot_cpu __initdata = 1;
237
238         WARN_ON_FPU(!on_boot_cpu);
239         on_boot_cpu = 0;
240
241         WARN_ON_FPU(current->thread.fpu.fpstate_active);
242 }
243
244 /*
245  * We parse fpu parameters early because fpu__init_system() is executed
246  * before parse_early_param().
247  */
248 static void __init fpu__init_parse_early_param(void)
249 {
250         if (cmdline_find_option_bool(boot_command_line, "no387"))
251                 setup_clear_cpu_cap(X86_FEATURE_FPU);
252
253         if (cmdline_find_option_bool(boot_command_line, "nofxsr")) {
254                 setup_clear_cpu_cap(X86_FEATURE_FXSR);
255                 setup_clear_cpu_cap(X86_FEATURE_FXSR_OPT);
256                 setup_clear_cpu_cap(X86_FEATURE_XMM);
257         }
258
259         if (cmdline_find_option_bool(boot_command_line, "noxsave"))
260                 fpu__xstate_clear_all_cpu_caps();
261
262         if (cmdline_find_option_bool(boot_command_line, "noxsaveopt"))
263                 setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
264
265         if (cmdline_find_option_bool(boot_command_line, "noxsaves"))
266                 setup_clear_cpu_cap(X86_FEATURE_XSAVES);
267 }
268
269 /*
270  * Called on the boot CPU once per system bootup, to set up the initial
271  * FPU state that is later cloned into all processes:
272  */
273 void __init fpu__init_system(struct cpuinfo_x86 *c)
274 {
275         fpu__init_parse_early_param();
276         fpu__init_system_early_generic(c);
277
278         /*
279          * The FPU has to be operational for some of the
280          * later FPU init activities:
281          */
282         fpu__init_cpu();
283
284         fpu__init_system_generic();
285         fpu__init_system_xstate_size_legacy();
286         fpu__init_system_xstate();
287         fpu__init_task_struct_size();
288
289         fpu__init_system_ctx_switch();
290 }