1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 1994 Linus Torvalds
5 * Cyrix stuff, June 1998 by:
6 * - Rafael R. Reilova (moved everything from head.S),
7 * <rreilova@ececs.uc.edu>
8 * - Channing Corn (tests & fixes),
9 * - Andrew D. Balsa (code cleanup).
11 #include <linux/init.h>
12 #include <linux/utsname.h>
13 #include <linux/cpu.h>
14 #include <linux/module.h>
15 #include <linux/nospec.h>
16 #include <linux/prctl.h>
17 #include <linux/sched/smt.h>
19 #include <asm/spec-ctrl.h>
20 #include <asm/cmdline.h>
22 #include <asm/processor.h>
23 #include <asm/processor-flags.h>
24 #include <asm/fpu/internal.h>
27 #include <asm/paravirt.h>
28 #include <asm/alternative.h>
29 #include <asm/pgtable.h>
30 #include <asm/set_memory.h>
31 #include <asm/intel-family.h>
32 #include <asm/e820/api.h>
33 #include <asm/hypervisor.h>
37 static void __init spectre_v2_select_mitigation(void);
38 static void __init ssb_select_mitigation(void);
39 static void __init l1tf_select_mitigation(void);
40 static void __init mds_select_mitigation(void);
42 /* The base value of the SPEC_CTRL MSR that always has to be preserved. */
43 u64 x86_spec_ctrl_base;
44 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
45 static DEFINE_MUTEX(spec_ctrl_mutex);
48 * The vendor and possibly platform specific bits which can be modified in
51 static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
54 * AMD specific MSR info for Speculative Store Bypass control.
55 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
57 u64 __ro_after_init x86_amd_ls_cfg_base;
58 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
60 /* Control conditional STIBP in switch_to() */
61 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
62 /* Control conditional IBPB in switch_mm() */
63 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
64 /* Control unconditional IBPB in switch_mm() */
65 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
67 /* Control MDS CPU buffer clear before returning to user space */
68 DEFINE_STATIC_KEY_FALSE(mds_user_clear);
69 EXPORT_SYMBOL_GPL(mds_user_clear);
70 /* Control MDS CPU buffer clear before idling (halt, mwait) */
71 DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
72 EXPORT_SYMBOL_GPL(mds_idle_clear);
74 void __init check_bugs(void)
79 * identify_boot_cpu() initialized SMT support information, let the
82 cpu_smt_check_topology();
84 if (!IS_ENABLED(CONFIG_SMP)) {
86 print_cpu_info(&boot_cpu_data);
90 * Read the SPEC_CTRL MSR to account for reserved bits which may
91 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
92 * init code as it is not enumerated and depends on the family.
94 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
95 rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
97 /* Allow STIBP in MSR_SPEC_CTRL if supported */
98 if (boot_cpu_has(X86_FEATURE_STIBP))
99 x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
101 /* Select the proper spectre mitigation before patching alternatives */
102 spectre_v2_select_mitigation();
105 * Select proper mitigation for any exposure to the Speculative Store
106 * Bypass vulnerability.
108 ssb_select_mitigation();
110 l1tf_select_mitigation();
112 mds_select_mitigation();
118 * Check whether we are able to run this kernel safely on SMP.
120 * - i386 is no longer supported.
121 * - In order to run on anything without a TSC, we need to be
122 * compiled for a i486.
124 if (boot_cpu_data.x86 < 4)
125 panic("Kernel requires i486+ for 'invlpg' and other features");
127 init_utsname()->machine[1] =
128 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
129 alternative_instructions();
131 fpu__init_check_bugs();
132 #else /* CONFIG_X86_64 */
133 alternative_instructions();
136 * Make sure the first 2MB area is not mapped by huge pages
137 * There are typically fixed size MTRRs in there and overlapping
138 * MTRRs into large pages causes slow downs.
140 * Right now we don't do that with gbpages because there seems
141 * very little benefit for that case.
144 set_memory_4k((unsigned long)__va(0), 1);
149 x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
151 u64 msrval, guestval, hostval = x86_spec_ctrl_base;
152 struct thread_info *ti = current_thread_info();
154 /* Is MSR_SPEC_CTRL implemented ? */
155 if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
157 * Restrict guest_spec_ctrl to supported values. Clear the
158 * modifiable bits in the host base value and or the
159 * modifiable bits from the guest value.
161 guestval = hostval & ~x86_spec_ctrl_mask;
162 guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
164 /* SSBD controlled in MSR_SPEC_CTRL */
165 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
166 static_cpu_has(X86_FEATURE_AMD_SSBD))
167 hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
169 /* Conditional STIBP enabled? */
170 if (static_branch_unlikely(&switch_to_cond_stibp))
171 hostval |= stibp_tif_to_spec_ctrl(ti->flags);
173 if (hostval != guestval) {
174 msrval = setguest ? guestval : hostval;
175 wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
180 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
181 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
183 if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
184 !static_cpu_has(X86_FEATURE_VIRT_SSBD))
188 * If the host has SSBD mitigation enabled, force it in the host's
189 * virtual MSR value. If its not permanently enabled, evaluate
190 * current's TIF_SSBD thread flag.
192 if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
193 hostval = SPEC_CTRL_SSBD;
195 hostval = ssbd_tif_to_spec_ctrl(ti->flags);
197 /* Sanitize the guest value */
198 guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
200 if (hostval != guestval) {
203 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
204 ssbd_spec_ctrl_to_tif(hostval);
206 speculation_ctrl_update(tif);
209 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
211 static void x86_amd_ssb_disable(void)
213 u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
215 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
216 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
217 else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
218 wrmsrl(MSR_AMD64_LS_CFG, msrval);
222 #define pr_fmt(fmt) "MDS: " fmt
224 /* Default mitigation for MDS-affected CPUs */
225 static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL;
226 static bool mds_nosmt __ro_after_init = false;
228 static const char * const mds_strings[] = {
229 [MDS_MITIGATION_OFF] = "Vulnerable",
230 [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers",
231 [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode",
234 static void __init mds_select_mitigation(void)
236 if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) {
237 mds_mitigation = MDS_MITIGATION_OFF;
241 if (mds_mitigation == MDS_MITIGATION_FULL) {
242 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
243 mds_mitigation = MDS_MITIGATION_VMWERV;
245 static_branch_enable(&mds_user_clear);
247 if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
248 (mds_nosmt || cpu_mitigations_auto_nosmt()))
249 cpu_smt_disable(false);
252 pr_info("%s\n", mds_strings[mds_mitigation]);
255 static int __init mds_cmdline(char *str)
257 if (!boot_cpu_has_bug(X86_BUG_MDS))
263 if (!strcmp(str, "off"))
264 mds_mitigation = MDS_MITIGATION_OFF;
265 else if (!strcmp(str, "full"))
266 mds_mitigation = MDS_MITIGATION_FULL;
267 else if (!strcmp(str, "full,nosmt")) {
268 mds_mitigation = MDS_MITIGATION_FULL;
274 early_param("mds", mds_cmdline);
277 #define pr_fmt(fmt) "Spectre V2 : " fmt
279 static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
282 static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init =
283 SPECTRE_V2_USER_NONE;
285 #ifdef CONFIG_RETPOLINE
286 static bool spectre_v2_bad_module;
288 bool retpoline_module_ok(bool has_retpoline)
290 if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
293 pr_err("System may be vulnerable to spectre v2\n");
294 spectre_v2_bad_module = true;
298 static inline const char *spectre_v2_module_string(void)
300 return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
303 static inline const char *spectre_v2_module_string(void) { return ""; }
306 static inline bool match_option(const char *arg, int arglen, const char *opt)
308 int len = strlen(opt);
310 return len == arglen && !strncmp(arg, opt, len);
313 /* The kernel command line selection for spectre v2 */
314 enum spectre_v2_mitigation_cmd {
317 SPECTRE_V2_CMD_FORCE,
318 SPECTRE_V2_CMD_RETPOLINE,
319 SPECTRE_V2_CMD_RETPOLINE_GENERIC,
320 SPECTRE_V2_CMD_RETPOLINE_AMD,
323 enum spectre_v2_user_cmd {
324 SPECTRE_V2_USER_CMD_NONE,
325 SPECTRE_V2_USER_CMD_AUTO,
326 SPECTRE_V2_USER_CMD_FORCE,
327 SPECTRE_V2_USER_CMD_PRCTL,
328 SPECTRE_V2_USER_CMD_PRCTL_IBPB,
329 SPECTRE_V2_USER_CMD_SECCOMP,
330 SPECTRE_V2_USER_CMD_SECCOMP_IBPB,
333 static const char * const spectre_v2_user_strings[] = {
334 [SPECTRE_V2_USER_NONE] = "User space: Vulnerable",
335 [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection",
336 [SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection",
337 [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl",
338 [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl",
341 static const struct {
343 enum spectre_v2_user_cmd cmd;
345 } v2_user_options[] __initconst = {
346 { "auto", SPECTRE_V2_USER_CMD_AUTO, false },
347 { "off", SPECTRE_V2_USER_CMD_NONE, false },
348 { "on", SPECTRE_V2_USER_CMD_FORCE, true },
349 { "prctl", SPECTRE_V2_USER_CMD_PRCTL, false },
350 { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB, false },
351 { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP, false },
352 { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB, false },
355 static void __init spec_v2_user_print_cond(const char *reason, bool secure)
357 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
358 pr_info("spectre_v2_user=%s forced on command line.\n", reason);
361 static enum spectre_v2_user_cmd __init
362 spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
368 case SPECTRE_V2_CMD_NONE:
369 return SPECTRE_V2_USER_CMD_NONE;
370 case SPECTRE_V2_CMD_FORCE:
371 return SPECTRE_V2_USER_CMD_FORCE;
376 ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
379 return SPECTRE_V2_USER_CMD_AUTO;
381 for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
382 if (match_option(arg, ret, v2_user_options[i].option)) {
383 spec_v2_user_print_cond(v2_user_options[i].option,
384 v2_user_options[i].secure);
385 return v2_user_options[i].cmd;
389 pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg);
390 return SPECTRE_V2_USER_CMD_AUTO;
394 spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
396 enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
397 bool smt_possible = IS_ENABLED(CONFIG_SMP);
398 enum spectre_v2_user_cmd cmd;
400 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
403 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
404 cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
405 smt_possible = false;
407 cmd = spectre_v2_parse_user_cmdline(v2_cmd);
409 case SPECTRE_V2_USER_CMD_NONE:
411 case SPECTRE_V2_USER_CMD_FORCE:
412 mode = SPECTRE_V2_USER_STRICT;
414 case SPECTRE_V2_USER_CMD_PRCTL:
415 case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
416 mode = SPECTRE_V2_USER_PRCTL;
418 case SPECTRE_V2_USER_CMD_AUTO:
419 case SPECTRE_V2_USER_CMD_SECCOMP:
420 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
421 if (IS_ENABLED(CONFIG_SECCOMP))
422 mode = SPECTRE_V2_USER_SECCOMP;
424 mode = SPECTRE_V2_USER_PRCTL;
429 * At this point, an STIBP mode other than "off" has been set.
430 * If STIBP support is not being forced, check if STIBP always-on
433 if (mode != SPECTRE_V2_USER_STRICT &&
434 boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
435 mode = SPECTRE_V2_USER_STRICT_PREFERRED;
437 /* Initialize Indirect Branch Prediction Barrier */
438 if (boot_cpu_has(X86_FEATURE_IBPB)) {
439 setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
442 case SPECTRE_V2_USER_CMD_FORCE:
443 case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
444 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
445 static_branch_enable(&switch_mm_always_ibpb);
447 case SPECTRE_V2_USER_CMD_PRCTL:
448 case SPECTRE_V2_USER_CMD_AUTO:
449 case SPECTRE_V2_USER_CMD_SECCOMP:
450 static_branch_enable(&switch_mm_cond_ibpb);
456 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
457 static_key_enabled(&switch_mm_always_ibpb) ?
458 "always-on" : "conditional");
461 /* If enhanced IBRS is enabled no STIBP required */
462 if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
466 * If SMT is not possible or STIBP is not available clear the STIBP
469 if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP))
470 mode = SPECTRE_V2_USER_NONE;
472 spectre_v2_user = mode;
473 /* Only print the STIBP mode when SMT possible */
475 pr_info("%s\n", spectre_v2_user_strings[mode]);
478 static const char * const spectre_v2_strings[] = {
479 [SPECTRE_V2_NONE] = "Vulnerable",
480 [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
481 [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
482 [SPECTRE_V2_IBRS_ENHANCED] = "Mitigation: Enhanced IBRS",
485 static const struct {
487 enum spectre_v2_mitigation_cmd cmd;
489 } mitigation_options[] __initconst = {
490 { "off", SPECTRE_V2_CMD_NONE, false },
491 { "on", SPECTRE_V2_CMD_FORCE, true },
492 { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
493 { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false },
494 { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
495 { "auto", SPECTRE_V2_CMD_AUTO, false },
498 static void __init spec_v2_print_cond(const char *reason, bool secure)
500 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
501 pr_info("%s selected on command line.\n", reason);
504 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
506 enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
510 if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") ||
511 cpu_mitigations_off())
512 return SPECTRE_V2_CMD_NONE;
514 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
516 return SPECTRE_V2_CMD_AUTO;
518 for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
519 if (!match_option(arg, ret, mitigation_options[i].option))
521 cmd = mitigation_options[i].cmd;
525 if (i >= ARRAY_SIZE(mitigation_options)) {
526 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
527 return SPECTRE_V2_CMD_AUTO;
530 if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
531 cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
532 cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
533 !IS_ENABLED(CONFIG_RETPOLINE)) {
534 pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
535 return SPECTRE_V2_CMD_AUTO;
538 if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD &&
539 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON &&
540 boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
541 pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
542 return SPECTRE_V2_CMD_AUTO;
545 spec_v2_print_cond(mitigation_options[i].option,
546 mitigation_options[i].secure);
550 static void __init spectre_v2_select_mitigation(void)
552 enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
553 enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
556 * If the CPU is not affected and the command line mode is NONE or AUTO
557 * then nothing to do.
559 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
560 (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
564 case SPECTRE_V2_CMD_NONE:
567 case SPECTRE_V2_CMD_FORCE:
568 case SPECTRE_V2_CMD_AUTO:
569 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
570 mode = SPECTRE_V2_IBRS_ENHANCED;
571 /* Force it so VMEXIT will restore correctly */
572 x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
573 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
574 goto specv2_set_mode;
576 if (IS_ENABLED(CONFIG_RETPOLINE))
579 case SPECTRE_V2_CMD_RETPOLINE_AMD:
580 if (IS_ENABLED(CONFIG_RETPOLINE))
583 case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
584 if (IS_ENABLED(CONFIG_RETPOLINE))
585 goto retpoline_generic;
587 case SPECTRE_V2_CMD_RETPOLINE:
588 if (IS_ENABLED(CONFIG_RETPOLINE))
592 pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
596 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
597 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
599 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
600 pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
601 goto retpoline_generic;
603 mode = SPECTRE_V2_RETPOLINE_AMD;
604 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
605 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
608 mode = SPECTRE_V2_RETPOLINE_GENERIC;
609 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
613 spectre_v2_enabled = mode;
614 pr_info("%s\n", spectre_v2_strings[mode]);
617 * If spectre v2 protection has been enabled, unconditionally fill
618 * RSB during a context switch; this protects against two independent
621 * - RSB underflow (and switch to BTB) on Skylake+
622 * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
624 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
625 pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
628 * Retpoline means the kernel is safe because it has no indirect
629 * branches. Enhanced IBRS protects firmware too, so, enable restricted
630 * speculation around firmware calls only when Enhanced IBRS isn't
633 * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
634 * the user might select retpoline on the kernel command line and if
635 * the CPU supports Enhanced IBRS, kernel might un-intentionally not
636 * enable IBRS around firmware calls.
638 if (boot_cpu_has(X86_FEATURE_IBRS) && mode != SPECTRE_V2_IBRS_ENHANCED) {
639 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
640 pr_info("Enabling Restricted Speculation for firmware calls\n");
643 /* Set up IBPB and STIBP depending on the general spectre V2 command */
644 spectre_v2_user_select_mitigation(cmd);
647 static void update_stibp_msr(void * __unused)
649 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
652 /* Update x86_spec_ctrl_base in case SMT state changed. */
653 static void update_stibp_strict(void)
655 u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
657 if (sched_smt_active())
658 mask |= SPEC_CTRL_STIBP;
660 if (mask == x86_spec_ctrl_base)
663 pr_info("Update user space SMT mitigation: STIBP %s\n",
664 mask & SPEC_CTRL_STIBP ? "always-on" : "off");
665 x86_spec_ctrl_base = mask;
666 on_each_cpu(update_stibp_msr, NULL, 1);
669 /* Update the static key controlling the evaluation of TIF_SPEC_IB */
670 static void update_indir_branch_cond(void)
672 if (sched_smt_active())
673 static_branch_enable(&switch_to_cond_stibp);
675 static_branch_disable(&switch_to_cond_stibp);
679 #define pr_fmt(fmt) fmt
681 /* Update the static key controlling the MDS CPU buffer clear in idle */
682 static void update_mds_branch_idle(void)
685 * Enable the idle clearing if SMT is active on CPUs which are
686 * affected only by MSBDS and not any other MDS variant.
688 * The other variants cannot be mitigated when SMT is enabled, so
689 * clearing the buffers on idle just to prevent the Store Buffer
690 * repartitioning leak would be a window dressing exercise.
692 if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY))
695 if (sched_smt_active())
696 static_branch_enable(&mds_idle_clear);
698 static_branch_disable(&mds_idle_clear);
701 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
703 void arch_smt_update(void)
705 /* Enhanced IBRS implies STIBP. No update required. */
706 if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
709 mutex_lock(&spec_ctrl_mutex);
711 switch (spectre_v2_user) {
712 case SPECTRE_V2_USER_NONE:
714 case SPECTRE_V2_USER_STRICT:
715 case SPECTRE_V2_USER_STRICT_PREFERRED:
716 update_stibp_strict();
718 case SPECTRE_V2_USER_PRCTL:
719 case SPECTRE_V2_USER_SECCOMP:
720 update_indir_branch_cond();
724 switch (mds_mitigation) {
725 case MDS_MITIGATION_FULL:
726 case MDS_MITIGATION_VMWERV:
727 if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
728 pr_warn_once(MDS_MSG_SMT);
729 update_mds_branch_idle();
731 case MDS_MITIGATION_OFF:
735 mutex_unlock(&spec_ctrl_mutex);
739 #define pr_fmt(fmt) "Speculative Store Bypass: " fmt
741 static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
743 /* The kernel command line selection */
744 enum ssb_mitigation_cmd {
745 SPEC_STORE_BYPASS_CMD_NONE,
746 SPEC_STORE_BYPASS_CMD_AUTO,
747 SPEC_STORE_BYPASS_CMD_ON,
748 SPEC_STORE_BYPASS_CMD_PRCTL,
749 SPEC_STORE_BYPASS_CMD_SECCOMP,
752 static const char * const ssb_strings[] = {
753 [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
754 [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
755 [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
756 [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
759 static const struct {
761 enum ssb_mitigation_cmd cmd;
762 } ssb_mitigation_options[] __initconst = {
763 { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
764 { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
765 { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
766 { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
767 { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
770 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
772 enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
776 if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") ||
777 cpu_mitigations_off()) {
778 return SPEC_STORE_BYPASS_CMD_NONE;
780 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
783 return SPEC_STORE_BYPASS_CMD_AUTO;
785 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
786 if (!match_option(arg, ret, ssb_mitigation_options[i].option))
789 cmd = ssb_mitigation_options[i].cmd;
793 if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
794 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
795 return SPEC_STORE_BYPASS_CMD_AUTO;
802 static enum ssb_mitigation __init __ssb_select_mitigation(void)
804 enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
805 enum ssb_mitigation_cmd cmd;
807 if (!boot_cpu_has(X86_FEATURE_SSBD))
810 cmd = ssb_parse_cmdline();
811 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
812 (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
813 cmd == SPEC_STORE_BYPASS_CMD_AUTO))
817 case SPEC_STORE_BYPASS_CMD_AUTO:
818 case SPEC_STORE_BYPASS_CMD_SECCOMP:
820 * Choose prctl+seccomp as the default mode if seccomp is
823 if (IS_ENABLED(CONFIG_SECCOMP))
824 mode = SPEC_STORE_BYPASS_SECCOMP;
826 mode = SPEC_STORE_BYPASS_PRCTL;
828 case SPEC_STORE_BYPASS_CMD_ON:
829 mode = SPEC_STORE_BYPASS_DISABLE;
831 case SPEC_STORE_BYPASS_CMD_PRCTL:
832 mode = SPEC_STORE_BYPASS_PRCTL;
834 case SPEC_STORE_BYPASS_CMD_NONE:
839 * If SSBD is controlled by the SPEC_CTRL MSR, then set the proper
840 * bit in the mask to allow guests to use the mitigation even in the
841 * case where the host does not enable it.
843 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
844 static_cpu_has(X86_FEATURE_AMD_SSBD)) {
845 x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
849 * We have three CPU feature flags that are in play here:
850 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
851 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
852 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
854 if (mode == SPEC_STORE_BYPASS_DISABLE) {
855 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
857 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
858 * use a completely different MSR and bit dependent on family.
860 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
861 !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
862 x86_amd_ssb_disable();
864 x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
865 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
872 static void ssb_select_mitigation(void)
874 ssb_mode = __ssb_select_mitigation();
876 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
877 pr_info("%s\n", ssb_strings[ssb_mode]);
881 #define pr_fmt(fmt) "Speculation prctl: " fmt
883 static void task_update_spec_tif(struct task_struct *tsk)
885 /* Force the update of the real TIF bits */
886 set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
889 * Immediately update the speculation control MSRs for the current
890 * task, but for a non-current task delay setting the CPU
891 * mitigation until it is scheduled next.
893 * This can only happen for SECCOMP mitigation. For PRCTL it's
894 * always the current task.
897 speculation_ctrl_update_current();
900 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
902 if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
903 ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
908 /* If speculation is force disabled, enable is not allowed */
909 if (task_spec_ssb_force_disable(task))
911 task_clear_spec_ssb_disable(task);
912 task_clear_spec_ssb_noexec(task);
913 task_update_spec_tif(task);
915 case PR_SPEC_DISABLE:
916 task_set_spec_ssb_disable(task);
917 task_clear_spec_ssb_noexec(task);
918 task_update_spec_tif(task);
920 case PR_SPEC_FORCE_DISABLE:
921 task_set_spec_ssb_disable(task);
922 task_set_spec_ssb_force_disable(task);
923 task_clear_spec_ssb_noexec(task);
924 task_update_spec_tif(task);
926 case PR_SPEC_DISABLE_NOEXEC:
927 if (task_spec_ssb_force_disable(task))
929 task_set_spec_ssb_disable(task);
930 task_set_spec_ssb_noexec(task);
931 task_update_spec_tif(task);
939 static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
943 if (spectre_v2_user == SPECTRE_V2_USER_NONE)
946 * Indirect branch speculation is always disabled in strict
949 if (spectre_v2_user == SPECTRE_V2_USER_STRICT ||
950 spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED)
952 task_clear_spec_ib_disable(task);
953 task_update_spec_tif(task);
955 case PR_SPEC_DISABLE:
956 case PR_SPEC_FORCE_DISABLE:
958 * Indirect branch speculation is always allowed when
959 * mitigation is force disabled.
961 if (spectre_v2_user == SPECTRE_V2_USER_NONE)
963 if (spectre_v2_user == SPECTRE_V2_USER_STRICT ||
964 spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED)
966 task_set_spec_ib_disable(task);
967 if (ctrl == PR_SPEC_FORCE_DISABLE)
968 task_set_spec_ib_force_disable(task);
969 task_update_spec_tif(task);
977 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
981 case PR_SPEC_STORE_BYPASS:
982 return ssb_prctl_set(task, ctrl);
983 case PR_SPEC_INDIRECT_BRANCH:
984 return ib_prctl_set(task, ctrl);
990 #ifdef CONFIG_SECCOMP
991 void arch_seccomp_spec_mitigate(struct task_struct *task)
993 if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
994 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
995 if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP)
996 ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
1000 static int ssb_prctl_get(struct task_struct *task)
1003 case SPEC_STORE_BYPASS_DISABLE:
1004 return PR_SPEC_DISABLE;
1005 case SPEC_STORE_BYPASS_SECCOMP:
1006 case SPEC_STORE_BYPASS_PRCTL:
1007 if (task_spec_ssb_force_disable(task))
1008 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
1009 if (task_spec_ssb_noexec(task))
1010 return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
1011 if (task_spec_ssb_disable(task))
1012 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
1013 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
1015 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
1016 return PR_SPEC_ENABLE;
1017 return PR_SPEC_NOT_AFFECTED;
1021 static int ib_prctl_get(struct task_struct *task)
1023 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
1024 return PR_SPEC_NOT_AFFECTED;
1026 switch (spectre_v2_user) {
1027 case SPECTRE_V2_USER_NONE:
1028 return PR_SPEC_ENABLE;
1029 case SPECTRE_V2_USER_PRCTL:
1030 case SPECTRE_V2_USER_SECCOMP:
1031 if (task_spec_ib_force_disable(task))
1032 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
1033 if (task_spec_ib_disable(task))
1034 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
1035 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
1036 case SPECTRE_V2_USER_STRICT:
1037 case SPECTRE_V2_USER_STRICT_PREFERRED:
1038 return PR_SPEC_DISABLE;
1040 return PR_SPEC_NOT_AFFECTED;
1044 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
1047 case PR_SPEC_STORE_BYPASS:
1048 return ssb_prctl_get(task);
1049 case PR_SPEC_INDIRECT_BRANCH:
1050 return ib_prctl_get(task);
1056 void x86_spec_ctrl_setup_ap(void)
1058 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
1059 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
1061 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
1062 x86_amd_ssb_disable();
1066 #define pr_fmt(fmt) "L1TF: " fmt
1068 /* Default mitigation for L1TF-affected CPUs */
1069 enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
1070 #if IS_ENABLED(CONFIG_KVM_INTEL)
1071 EXPORT_SYMBOL_GPL(l1tf_mitigation);
1073 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
1074 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
1077 * These CPUs all support 44bits physical address space internally in the
1078 * cache but CPUID can report a smaller number of physical address bits.
1080 * The L1TF mitigation uses the top most address bit for the inversion of
1081 * non present PTEs. When the installed memory reaches into the top most
1082 * address bit due to memory holes, which has been observed on machines
1083 * which report 36bits physical address bits and have 32G RAM installed,
1084 * then the mitigation range check in l1tf_select_mitigation() triggers.
1085 * This is a false positive because the mitigation is still possible due to
1086 * the fact that the cache uses 44bit internally. Use the cache bits
1087 * instead of the reported physical bits and adjust them on the affected
1088 * machines to 44bit if the reported bits are less than 44.
1090 static void override_cache_bits(struct cpuinfo_x86 *c)
1095 switch (c->x86_model) {
1096 case INTEL_FAM6_NEHALEM:
1097 case INTEL_FAM6_WESTMERE:
1098 case INTEL_FAM6_SANDYBRIDGE:
1099 case INTEL_FAM6_IVYBRIDGE:
1100 case INTEL_FAM6_HASWELL_CORE:
1101 case INTEL_FAM6_HASWELL_ULT:
1102 case INTEL_FAM6_HASWELL_GT3E:
1103 case INTEL_FAM6_BROADWELL_CORE:
1104 case INTEL_FAM6_BROADWELL_GT3E:
1105 case INTEL_FAM6_SKYLAKE_MOBILE:
1106 case INTEL_FAM6_SKYLAKE_DESKTOP:
1107 case INTEL_FAM6_KABYLAKE_MOBILE:
1108 case INTEL_FAM6_KABYLAKE_DESKTOP:
1109 if (c->x86_cache_bits < 44)
1110 c->x86_cache_bits = 44;
1115 static void __init l1tf_select_mitigation(void)
1119 if (!boot_cpu_has_bug(X86_BUG_L1TF))
1122 if (cpu_mitigations_off())
1123 l1tf_mitigation = L1TF_MITIGATION_OFF;
1124 else if (cpu_mitigations_auto_nosmt())
1125 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
1127 override_cache_bits(&boot_cpu_data);
1129 switch (l1tf_mitigation) {
1130 case L1TF_MITIGATION_OFF:
1131 case L1TF_MITIGATION_FLUSH_NOWARN:
1132 case L1TF_MITIGATION_FLUSH:
1134 case L1TF_MITIGATION_FLUSH_NOSMT:
1135 case L1TF_MITIGATION_FULL:
1136 cpu_smt_disable(false);
1138 case L1TF_MITIGATION_FULL_FORCE:
1139 cpu_smt_disable(true);
1143 #if CONFIG_PGTABLE_LEVELS == 2
1144 pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
1148 half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
1149 if (l1tf_mitigation != L1TF_MITIGATION_OFF &&
1150 e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
1151 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
1152 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
1154 pr_info("However, doing so will make a part of your RAM unusable.\n");
1155 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n");
1159 setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
1162 static int __init l1tf_cmdline(char *str)
1164 if (!boot_cpu_has_bug(X86_BUG_L1TF))
1170 if (!strcmp(str, "off"))
1171 l1tf_mitigation = L1TF_MITIGATION_OFF;
1172 else if (!strcmp(str, "flush,nowarn"))
1173 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
1174 else if (!strcmp(str, "flush"))
1175 l1tf_mitigation = L1TF_MITIGATION_FLUSH;
1176 else if (!strcmp(str, "flush,nosmt"))
1177 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
1178 else if (!strcmp(str, "full"))
1179 l1tf_mitigation = L1TF_MITIGATION_FULL;
1180 else if (!strcmp(str, "full,force"))
1181 l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
1185 early_param("l1tf", l1tf_cmdline);
1188 #define pr_fmt(fmt) fmt
1192 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
1194 #if IS_ENABLED(CONFIG_KVM_INTEL)
1195 static const char * const l1tf_vmx_states[] = {
1196 [VMENTER_L1D_FLUSH_AUTO] = "auto",
1197 [VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
1198 [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
1199 [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes",
1200 [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled",
1201 [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary"
1204 static ssize_t l1tf_show_state(char *buf)
1206 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
1207 return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
1209 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
1210 (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
1211 sched_smt_active())) {
1212 return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
1213 l1tf_vmx_states[l1tf_vmx_mitigation]);
1216 return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
1217 l1tf_vmx_states[l1tf_vmx_mitigation],
1218 sched_smt_active() ? "vulnerable" : "disabled");
1221 static ssize_t l1tf_show_state(char *buf)
1223 return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
1227 static ssize_t mds_show_state(char *buf)
1229 if (!hypervisor_is_type(X86_HYPER_NATIVE)) {
1230 return sprintf(buf, "%s; SMT Host state unknown\n",
1231 mds_strings[mds_mitigation]);
1234 if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
1235 return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
1236 (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
1237 sched_smt_active() ? "mitigated" : "disabled"));
1240 return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
1241 sched_smt_active() ? "vulnerable" : "disabled");
1244 static char *stibp_state(void)
1246 if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
1249 switch (spectre_v2_user) {
1250 case SPECTRE_V2_USER_NONE:
1251 return ", STIBP: disabled";
1252 case SPECTRE_V2_USER_STRICT:
1253 return ", STIBP: forced";
1254 case SPECTRE_V2_USER_STRICT_PREFERRED:
1255 return ", STIBP: always-on";
1256 case SPECTRE_V2_USER_PRCTL:
1257 case SPECTRE_V2_USER_SECCOMP:
1258 if (static_key_enabled(&switch_to_cond_stibp))
1259 return ", STIBP: conditional";
1264 static char *ibpb_state(void)
1266 if (boot_cpu_has(X86_FEATURE_IBPB)) {
1267 if (static_key_enabled(&switch_mm_always_ibpb))
1268 return ", IBPB: always-on";
1269 if (static_key_enabled(&switch_mm_cond_ibpb))
1270 return ", IBPB: conditional";
1271 return ", IBPB: disabled";
1276 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
1277 char *buf, unsigned int bug)
1279 if (!boot_cpu_has_bug(bug))
1280 return sprintf(buf, "Not affected\n");
1283 case X86_BUG_CPU_MELTDOWN:
1284 if (boot_cpu_has(X86_FEATURE_PTI))
1285 return sprintf(buf, "Mitigation: PTI\n");
1287 if (hypervisor_is_type(X86_HYPER_XEN_PV))
1288 return sprintf(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
1292 case X86_BUG_SPECTRE_V1:
1293 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
1295 case X86_BUG_SPECTRE_V2:
1296 return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
1298 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
1300 boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
1301 spectre_v2_module_string());
1303 case X86_BUG_SPEC_STORE_BYPASS:
1304 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
1307 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
1308 return l1tf_show_state(buf);
1312 return mds_show_state(buf);
1318 return sprintf(buf, "Vulnerable\n");
1321 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
1323 return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
1326 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
1328 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
1331 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
1333 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
1336 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
1338 return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
1341 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
1343 return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
1346 ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf)
1348 return cpu_show_common(dev, attr, buf, X86_BUG_MDS);