]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - arch/arm64/kernel/cpu_errata.c
arm64: add sysfs vulnerability show for speculative store bypass
[linux.git] / arch / arm64 / kernel / cpu_errata.c
index 9950bb0cbd52167c6b3b76e9d122291d70264df4..4bb0f7cad4189858ec477264a5e1880e1210855d 100644 (file)
@@ -109,7 +109,6 @@ cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
 
 atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
 
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
 #include <asm/mmu_context.h>
 #include <asm/cacheflush.h>
 
@@ -131,9 +130,9 @@ static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
        __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
 }
 
-static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
-                                     const char *hyp_vecs_start,
-                                     const char *hyp_vecs_end)
+static void install_bp_hardening_cb(bp_hardening_cb_t fn,
+                                   const char *hyp_vecs_start,
+                                   const char *hyp_vecs_end)
 {
        static DEFINE_RAW_SPINLOCK(bp_lock);
        int cpu, slot = -1;
@@ -169,7 +168,7 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
 #define __smccc_workaround_1_smc_start         NULL
 #define __smccc_workaround_1_smc_end           NULL
 
-static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
+static void install_bp_hardening_cb(bp_hardening_cb_t fn,
                                      const char *hyp_vecs_start,
                                      const char *hyp_vecs_end)
 {
@@ -177,23 +176,6 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
 }
 #endif /* CONFIG_KVM_INDIRECT_VECTORS */
 
-static void  install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
-                                    bp_hardening_cb_t fn,
-                                    const char *hyp_vecs_start,
-                                    const char *hyp_vecs_end)
-{
-       u64 pfr0;
-
-       if (!entry->matches(entry, SCOPE_LOCAL_CPU))
-               return;
-
-       pfr0 = read_cpuid(ID_AA64PFR0_EL1);
-       if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
-               return;
-
-       __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
-}
-
 #include <uapi/linux/psci.h>
 #include <linux/arm-smccc.h>
 #include <linux/psci.h>
@@ -220,60 +202,83 @@ static void qcom_link_stack_sanitization(void)
                     : "=&r" (tmp));
 }
 
-static void
-enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
+static bool __nospectre_v2;
+static int __init parse_nospectre_v2(char *str)
+{
+       __nospectre_v2 = true;
+       return 0;
+}
+early_param("nospectre_v2", parse_nospectre_v2);
+
+/*
+ * -1: No workaround
+ *  0: No workaround required
+ *  1: Workaround installed
+ */
+static int detect_harden_bp_fw(void)
 {
        bp_hardening_cb_t cb;
        void *smccc_start, *smccc_end;
        struct arm_smccc_res res;
        u32 midr = read_cpuid_id();
 
-       if (!entry->matches(entry, SCOPE_LOCAL_CPU))
-               return;
-
        if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
-               return;
+               return -1;
 
        switch (psci_ops.conduit) {
        case PSCI_CONDUIT_HVC:
                arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
                                  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
-               if ((int)res.a0 < 0)
-                       return;
-               cb = call_hvc_arch_workaround_1;
-               /* This is a guest, no need to patch KVM vectors */
-               smccc_start = NULL;
-               smccc_end = NULL;
+               switch ((int)res.a0) {
+               case 1:
+                       /* Firmware says we're just fine */
+                       return 0;
+               case 0:
+                       cb = call_hvc_arch_workaround_1;
+                       /* This is a guest, no need to patch KVM vectors */
+                       smccc_start = NULL;
+                       smccc_end = NULL;
+                       break;
+               default:
+                       return -1;
+               }
                break;
 
        case PSCI_CONDUIT_SMC:
                arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
                                  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
-               if ((int)res.a0 < 0)
-                       return;
-               cb = call_smc_arch_workaround_1;
-               smccc_start = __smccc_workaround_1_smc_start;
-               smccc_end = __smccc_workaround_1_smc_end;
+               switch ((int)res.a0) {
+               case 1:
+                       /* Firmware says we're just fine */
+                       return 0;
+               case 0:
+                       cb = call_smc_arch_workaround_1;
+                       smccc_start = __smccc_workaround_1_smc_start;
+                       smccc_end = __smccc_workaround_1_smc_end;
+                       break;
+               default:
+                       return -1;
+               }
                break;
 
        default:
-               return;
+               return -1;
        }
 
        if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
            ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
                cb = qcom_link_stack_sanitization;
 
-       install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
+       if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR))
+               install_bp_hardening_cb(cb, smccc_start, smccc_end);
 
-       return;
+       return 1;
 }
-#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
 
-#ifdef CONFIG_ARM64_SSBD
 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
 
 int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
+static bool __ssb_safe = true;
 
 static const struct ssbd_options {
        const char      *str;
@@ -343,6 +348,11 @@ void __init arm64_enable_wa2_handling(struct alt_instr *alt,
 
 void arm64_set_ssbd_mitigation(bool state)
 {
+       if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
+               pr_info_once("SSBD disabled by kernel configuration\n");
+               return;
+       }
+
        if (this_cpu_has_cap(ARM64_SSBS)) {
                if (state)
                        asm volatile(SET_PSTATE_SSBS(0));
@@ -372,6 +382,7 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
        struct arm_smccc_res res;
        bool required = true;
        s32 val;
+       bool this_cpu_safe = false;
 
        WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
 
@@ -380,8 +391,14 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
                goto out_printmsg;
        }
 
+       /* delay setting __ssb_safe until we get a firmware response */
+       if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
+               this_cpu_safe = true;
+
        if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
                ssbd_state = ARM64_SSBD_UNKNOWN;
+               if (!this_cpu_safe)
+                       __ssb_safe = false;
                return false;
        }
 
@@ -398,6 +415,8 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
 
        default:
                ssbd_state = ARM64_SSBD_UNKNOWN;
+               if (!this_cpu_safe)
+                       __ssb_safe = false;
                return false;
        }
 
@@ -406,14 +425,18 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
        switch (val) {
        case SMCCC_RET_NOT_SUPPORTED:
                ssbd_state = ARM64_SSBD_UNKNOWN;
+               if (!this_cpu_safe)
+                       __ssb_safe = false;
                return false;
 
+       /* machines with mixed mitigation requirements must not return this */
        case SMCCC_RET_NOT_REQUIRED:
                pr_info_once("%s mitigation not required\n", entry->desc);
                ssbd_state = ARM64_SSBD_MITIGATED;
                return false;
 
        case SMCCC_RET_SUCCESS:
+               __ssb_safe = false;
                required = true;
                break;
 
@@ -423,6 +446,8 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
 
        default:
                WARN_ON(1);
+               if (!this_cpu_safe)
+                       __ssb_safe = false;
                return false;
        }
 
@@ -462,7 +487,14 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
 
        return required;
 }
-#endif /* CONFIG_ARM64_SSBD */
+
+/* known invulnerable cores */
+static const struct midr_range arm64_ssb_cpus[] = {
+       MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
+       MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
+       MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+       {},
+};
 
 static void __maybe_unused
 cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
@@ -507,26 +539,67 @@ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
        .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                 \
        CAP_MIDR_RANGE_LIST(midr_list)
 
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+/* Track overall mitigation state. We are only mitigated if all cores are ok */
+static bool __hardenbp_enab = true;
+static bool __spectrev2_safe = true;
 
 /*
- * List of CPUs where we need to issue a psci call to
- * harden the branch predictor.
+ * List of CPUs that do not need any Spectre-v2 mitigation at all.
  */
-static const struct midr_range arm64_bp_harden_smccc_cpus[] = {
-       MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
-       MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
-       MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
-       MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
-       MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
-       MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
-       MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
-       MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
-       MIDR_ALL_VERSIONS(MIDR_NVIDIA_DENVER),
-       {},
+static const struct midr_range spectre_v2_safe_list[] = {
+       MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
+       MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
+       MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+       { /* sentinel */ }
 };
 
-#endif
+/*
+ * Track overall bp hardening for all heterogeneous cores in the machine.
+ * We are only considered "safe" if all booted cores are known safe.
+ */
+static bool __maybe_unused
+check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
+{
+       int need_wa;
+
+       WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+
+       /* If the CPU has CSV2 set, we're safe */
+       if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
+                                                ID_AA64PFR0_CSV2_SHIFT))
+               return false;
+
+       /* Alternatively, we have a list of unaffected CPUs */
+       if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
+               return false;
+
+       /* Fallback to firmware detection */
+       need_wa = detect_harden_bp_fw();
+       if (!need_wa)
+               return false;
+
+       __spectrev2_safe = false;
+
+       if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
+               pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n");
+               __hardenbp_enab = false;
+               return false;
+       }
+
+       /* forced off */
+       if (__nospectre_v2) {
+               pr_info_once("spectrev2 mitigation disabled by command line option\n");
+               __hardenbp_enab = false;
+               return false;
+       }
+
+       if (need_wa < 0) {
+               pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
+               __hardenbp_enab = false;
+       }
+
+       return (need_wa > 0);
+}
 
 #ifdef CONFIG_HARDEN_EL2_VECTORS
 
@@ -701,13 +774,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
                ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
        },
 #endif
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
        {
                .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
-               .cpu_enable = enable_smccc_arch_workaround_1,
-               ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus),
+               .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+               .matches = check_branch_predictor,
        },
-#endif
 #ifdef CONFIG_HARDEN_EL2_VECTORS
        {
                .desc = "EL2 vector hardening",
@@ -715,14 +786,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
                ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
        },
 #endif
-#ifdef CONFIG_ARM64_SSBD
        {
                .desc = "Speculative Store Bypass Disable",
                .capability = ARM64_SSBD,
                .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
                .matches = has_ssbd_mitigation,
+               .midr_range_list = arm64_ssb_cpus,
        },
-#endif
 #ifdef CONFIG_ARM64_ERRATUM_1188873
        {
                /* Cortex-A76 r0p0 to r2p0 */
@@ -742,3 +812,38 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
        {
        }
 };
+
+ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
+                           char *buf)
+{
+       return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+}
+
+ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
+               char *buf)
+{
+       if (__spectrev2_safe)
+               return sprintf(buf, "Not affected\n");
+
+       if (__hardenbp_enab)
+               return sprintf(buf, "Mitigation: Branch predictor hardening\n");
+
+       return sprintf(buf, "Vulnerable\n");
+}
+
+ssize_t cpu_show_spec_store_bypass(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       if (__ssb_safe)
+               return sprintf(buf, "Not affected\n");
+
+       switch (ssbd_state) {
+       case ARM64_SSBD_KERNEL:
+       case ARM64_SSBD_FORCE_ENABLE:
+               if (IS_ENABLED(CONFIG_ARM64_SSBD))
+                       return sprintf(buf,
+                           "Mitigation: Speculative Store Bypass disabled via prctl\n");
+       }
+
+       return sprintf(buf, "Vulnerable\n");
+}