]> asedeno.scripts.mit.edu Git - linux.git/blob - arch/x86/kernel/cpu/bugs.c
sparc,leon: Select USB_UHCI_BIG_ENDIAN_{MMIO,DESC}
[linux.git] / arch / x86 / kernel / cpu / bugs.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Copyright (C) 1994  Linus Torvalds
4  *
5  *  Cyrix stuff, June 1998 by:
6  *      - Rafael R. Reilova (moved everything from head.S),
7  *        <rreilova@ececs.uc.edu>
8  *      - Channing Corn (tests & fixes),
9  *      - Andrew D. Balsa (code cleanup).
10  */
11 #include <linux/init.h>
12 #include <linux/utsname.h>
13 #include <linux/cpu.h>
14 #include <linux/module.h>
15
16 #include <asm/nospec-branch.h>
17 #include <asm/cmdline.h>
18 #include <asm/bugs.h>
19 #include <asm/processor.h>
20 #include <asm/processor-flags.h>
21 #include <asm/fpu/internal.h>
22 #include <asm/msr.h>
23 #include <asm/paravirt.h>
24 #include <asm/alternative.h>
25 #include <asm/pgtable.h>
26 #include <asm/set_memory.h>
27 #include <asm/intel-family.h>
28
29 static void __init spectre_v2_select_mitigation(void);
30
31 void __init check_bugs(void)
32 {
33         identify_boot_cpu();
34
35         if (!IS_ENABLED(CONFIG_SMP)) {
36                 pr_info("CPU: ");
37                 print_cpu_info(&boot_cpu_data);
38         }
39
40         /* Select the proper spectre mitigation before patching alternatives */
41         spectre_v2_select_mitigation();
42
43 #ifdef CONFIG_X86_32
44         /*
45          * Check whether we are able to run this kernel safely on SMP.
46          *
47          * - i386 is no longer supported.
48          * - In order to run on anything without a TSC, we need to be
49          *   compiled for a i486.
50          */
51         if (boot_cpu_data.x86 < 4)
52                 panic("Kernel requires i486+ for 'invlpg' and other features");
53
54         init_utsname()->machine[1] =
55                 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
56         alternative_instructions();
57
58         fpu__init_check_bugs();
59 #else /* CONFIG_X86_64 */
60         alternative_instructions();
61
62         /*
63          * Make sure the first 2MB area is not mapped by huge pages
64          * There are typically fixed size MTRRs in there and overlapping
65          * MTRRs into large pages causes slow downs.
66          *
67          * Right now we don't do that with gbpages because there seems
68          * very little benefit for that case.
69          */
70         if (!direct_gbpages)
71                 set_memory_4k((unsigned long)__va(0), 1);
72 #endif
73 }
74
75 /* The kernel command line selection */
76 enum spectre_v2_mitigation_cmd {
77         SPECTRE_V2_CMD_NONE,
78         SPECTRE_V2_CMD_AUTO,
79         SPECTRE_V2_CMD_FORCE,
80         SPECTRE_V2_CMD_RETPOLINE,
81         SPECTRE_V2_CMD_RETPOLINE_GENERIC,
82         SPECTRE_V2_CMD_RETPOLINE_AMD,
83 };
84
85 static const char *spectre_v2_strings[] = {
86         [SPECTRE_V2_NONE]                       = "Vulnerable",
87         [SPECTRE_V2_RETPOLINE_MINIMAL]          = "Vulnerable: Minimal generic ASM retpoline",
88         [SPECTRE_V2_RETPOLINE_MINIMAL_AMD]      = "Vulnerable: Minimal AMD ASM retpoline",
89         [SPECTRE_V2_RETPOLINE_GENERIC]          = "Mitigation: Full generic retpoline",
90         [SPECTRE_V2_RETPOLINE_AMD]              = "Mitigation: Full AMD retpoline",
91 };
92
93 #undef pr_fmt
94 #define pr_fmt(fmt)     "Spectre V2 : " fmt
95
96 static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
97
98 #ifdef RETPOLINE
99 static bool spectre_v2_bad_module;
100
101 bool retpoline_module_ok(bool has_retpoline)
102 {
103         if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
104                 return true;
105
106         pr_err("System may be vulnerable to spectre v2\n");
107         spectre_v2_bad_module = true;
108         return false;
109 }
110
111 static inline const char *spectre_v2_module_string(void)
112 {
113         return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
114 }
115 #else
116 static inline const char *spectre_v2_module_string(void) { return ""; }
117 #endif
118
119 static void __init spec2_print_if_insecure(const char *reason)
120 {
121         if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
122                 pr_info("%s selected on command line.\n", reason);
123 }
124
125 static void __init spec2_print_if_secure(const char *reason)
126 {
127         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
128                 pr_info("%s selected on command line.\n", reason);
129 }
130
131 static inline bool retp_compiler(void)
132 {
133         return __is_defined(RETPOLINE);
134 }
135
136 static inline bool match_option(const char *arg, int arglen, const char *opt)
137 {
138         int len = strlen(opt);
139
140         return len == arglen && !strncmp(arg, opt, len);
141 }
142
143 static const struct {
144         const char *option;
145         enum spectre_v2_mitigation_cmd cmd;
146         bool secure;
147 } mitigation_options[] = {
148         { "off",               SPECTRE_V2_CMD_NONE,              false },
149         { "on",                SPECTRE_V2_CMD_FORCE,             true },
150         { "retpoline",         SPECTRE_V2_CMD_RETPOLINE,         false },
151         { "retpoline,amd",     SPECTRE_V2_CMD_RETPOLINE_AMD,     false },
152         { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
153         { "auto",              SPECTRE_V2_CMD_AUTO,              false },
154 };
155
156 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
157 {
158         char arg[20];
159         int ret, i;
160         enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
161
162         if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
163                 return SPECTRE_V2_CMD_NONE;
164         else {
165                 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg,
166                                           sizeof(arg));
167                 if (ret < 0)
168                         return SPECTRE_V2_CMD_AUTO;
169
170                 for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
171                         if (!match_option(arg, ret, mitigation_options[i].option))
172                                 continue;
173                         cmd = mitigation_options[i].cmd;
174                         break;
175                 }
176
177                 if (i >= ARRAY_SIZE(mitigation_options)) {
178                         pr_err("unknown option (%s). Switching to AUTO select\n",
179                                mitigation_options[i].option);
180                         return SPECTRE_V2_CMD_AUTO;
181                 }
182         }
183
184         if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
185              cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
186              cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
187             !IS_ENABLED(CONFIG_RETPOLINE)) {
188                 pr_err("%s selected but not compiled in. Switching to AUTO select\n",
189                        mitigation_options[i].option);
190                 return SPECTRE_V2_CMD_AUTO;
191         }
192
193         if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD &&
194             boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
195                 pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
196                 return SPECTRE_V2_CMD_AUTO;
197         }
198
199         if (mitigation_options[i].secure)
200                 spec2_print_if_secure(mitigation_options[i].option);
201         else
202                 spec2_print_if_insecure(mitigation_options[i].option);
203
204         return cmd;
205 }
206
207 /* Check for Skylake-like CPUs (for RSB handling) */
208 static bool __init is_skylake_era(void)
209 {
210         if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
211             boot_cpu_data.x86 == 6) {
212                 switch (boot_cpu_data.x86_model) {
213                 case INTEL_FAM6_SKYLAKE_MOBILE:
214                 case INTEL_FAM6_SKYLAKE_DESKTOP:
215                 case INTEL_FAM6_SKYLAKE_X:
216                 case INTEL_FAM6_KABYLAKE_MOBILE:
217                 case INTEL_FAM6_KABYLAKE_DESKTOP:
218                         return true;
219                 }
220         }
221         return false;
222 }
223
224 static void __init spectre_v2_select_mitigation(void)
225 {
226         enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
227         enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
228
229         /*
230          * If the CPU is not affected and the command line mode is NONE or AUTO
231          * then nothing to do.
232          */
233         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
234             (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
235                 return;
236
237         switch (cmd) {
238         case SPECTRE_V2_CMD_NONE:
239                 return;
240
241         case SPECTRE_V2_CMD_FORCE:
242         case SPECTRE_V2_CMD_AUTO:
243                 if (IS_ENABLED(CONFIG_RETPOLINE))
244                         goto retpoline_auto;
245                 break;
246         case SPECTRE_V2_CMD_RETPOLINE_AMD:
247                 if (IS_ENABLED(CONFIG_RETPOLINE))
248                         goto retpoline_amd;
249                 break;
250         case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
251                 if (IS_ENABLED(CONFIG_RETPOLINE))
252                         goto retpoline_generic;
253                 break;
254         case SPECTRE_V2_CMD_RETPOLINE:
255                 if (IS_ENABLED(CONFIG_RETPOLINE))
256                         goto retpoline_auto;
257                 break;
258         }
259         pr_err("kernel not compiled with retpoline; no mitigation available!");
260         return;
261
262 retpoline_auto:
263         if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
264         retpoline_amd:
265                 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
266                         pr_err("LFENCE not serializing. Switching to generic retpoline\n");
267                         goto retpoline_generic;
268                 }
269                 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
270                                          SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
271                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
272                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
273         } else {
274         retpoline_generic:
275                 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
276                                          SPECTRE_V2_RETPOLINE_MINIMAL;
277                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
278         }
279
280         spectre_v2_enabled = mode;
281         pr_info("%s\n", spectre_v2_strings[mode]);
282
283         /*
284          * If neither SMEP or KPTI are available, there is a risk of
285          * hitting userspace addresses in the RSB after a context switch
286          * from a shallow call stack to a deeper one. To prevent this fill
287          * the entire RSB, even when using IBRS.
288          *
289          * Skylake era CPUs have a separate issue with *underflow* of the
290          * RSB, when they will predict 'ret' targets from the generic BTB.
291          * The proper mitigation for this is IBRS. If IBRS is not supported
292          * or deactivated in favour of retpolines the RSB fill on context
293          * switch is required.
294          */
295         if ((!boot_cpu_has(X86_FEATURE_PTI) &&
296              !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
297                 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
298                 pr_info("Filling RSB on context switch\n");
299         }
300
301         /* Initialize Indirect Branch Prediction Barrier if supported */
302         if (boot_cpu_has(X86_FEATURE_IBPB)) {
303                 setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
304                 pr_info("Enabling Indirect Branch Prediction Barrier\n");
305         }
306 }
307
308 #undef pr_fmt
309
310 #ifdef CONFIG_SYSFS
311 ssize_t cpu_show_meltdown(struct device *dev,
312                           struct device_attribute *attr, char *buf)
313 {
314         if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
315                 return sprintf(buf, "Not affected\n");
316         if (boot_cpu_has(X86_FEATURE_PTI))
317                 return sprintf(buf, "Mitigation: PTI\n");
318         return sprintf(buf, "Vulnerable\n");
319 }
320
321 ssize_t cpu_show_spectre_v1(struct device *dev,
322                             struct device_attribute *attr, char *buf)
323 {
324         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
325                 return sprintf(buf, "Not affected\n");
326         return sprintf(buf, "Mitigation: __user pointer sanitization\n");
327 }
328
329 ssize_t cpu_show_spectre_v2(struct device *dev,
330                             struct device_attribute *attr, char *buf)
331 {
332         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
333                 return sprintf(buf, "Not affected\n");
334
335         return sprintf(buf, "%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
336                        boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
337                        spectre_v2_module_string());
338 }
339 #endif
340
341 void __ibp_barrier(void)
342 {
343         __wrmsr(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, 0);
344 }
345 EXPORT_SYMBOL_GPL(__ibp_barrier);