]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
arm64/sve: KVM: Prevent guests from using SVE
authorDave Martin <Dave.Martin@arm.com>
Tue, 31 Oct 2017 15:51:16 +0000 (15:51 +0000)
committerWill Deacon <will.deacon@arm.com>
Fri, 3 Nov 2017 15:24:19 +0000 (15:24 +0000)
Until KVM has full SVE support, guests must not be allowed to
execute SVE instructions.

This patch enables the necessary traps, and also ensures that the
traps are disabled again on exit from the guest so that the host
can still use SVE if it wants to.

On guest exit, high bits of the SVE Zn registers may have been
clobbered as a side-effect the execution of FPSIMD instructions in
the guest.  The existing KVM host FPSIMD restore code is not
sufficient to restore these bits, so this patch explicitly marks
the CPU as not containing cached vector state for any task, thus
forcing a reload on the next return to userspace.  This is an
interim measure, in advance of adding full SVE awareness to KVM.

This marking of cached vector state in the CPU as invalid is done
using __this_cpu_write(fpsimd_last_state, NULL) in fpsimd.c.  Due
to the repeated use of this rather obscure operation, it makes
sense to factor it out as a separate helper with a clearer name.
This patch factors it out as fpsimd_flush_cpu_state(), and ports
all callers to use it.

As a side effect of this refactoring, a this_cpu_write() in
fpsimd_cpu_pm_notifier() is changed to __this_cpu_write().  This
should be fine, since cpu_pm_enter() is supposed to be called only
with interrupts disabled.

Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
arch/arm/include/asm/kvm_host.h
arch/arm64/include/asm/fpsimd.h
arch/arm64/include/asm/kvm_arm.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/kernel/fpsimd.c
arch/arm64/kvm/hyp/switch.c
virt/kvm/arm/arm.c

index 4a879f6ff13bea92d189eec9370be73cd616705c..242151ea69087a4ec8c4b5fd963c210ff30a89fe 100644 (file)
@@ -293,4 +293,7 @@ int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
                               struct kvm_device_attr *attr);
 
+/* All host FP/SIMD state is restored on guest exit, so nothing to save: */
+static inline void kvm_fpsimd_flush_cpu_state(void) {}
+
 #endif /* __ARM_KVM_HOST_H__ */
index b868412c815cfde38daad53111a3465535a750f7..74f34392a5318802c03c5a4751bdc4e63af661ea 100644 (file)
@@ -74,6 +74,7 @@ extern void fpsimd_restore_current_state(void);
 extern void fpsimd_update_current_state(struct fpsimd_state *state);
 
 extern void fpsimd_flush_task_state(struct task_struct *target);
+extern void sve_flush_cpu_state(void);
 
 /* Maximum VL that SVE VL-agnostic software can transparently support */
 #define SVE_VL_ARCH_MAX 0x100
index dbf05370169ab9cafe4b3f2229ac64e55f8c0a21..7f069ff37f06cff3a4e032bc5916be1b155ee1f8 100644 (file)
 #define CPTR_EL2_TTA   (1 << 20)
 #define CPTR_EL2_TFP   (1 << CPTR_EL2_TFP_SHIFT)
 #define CPTR_EL2_TZ    (1 << 8)
-#define CPTR_EL2_DEFAULT       0x000033ff
+#define CPTR_EL2_RES1  0x000032ff /* known RES1 bits in CPTR_EL2 */
+#define CPTR_EL2_DEFAULT       CPTR_EL2_RES1
 
 /* Hyp Debug Configuration Register bits */
 #define MDCR_EL2_TPMS          (1 << 14)
 
 #define CPACR_EL1_FPEN         (3 << 20)
 #define CPACR_EL1_TTA          (1 << 28)
+#define CPACR_EL1_DEFAULT      (CPACR_EL1_FPEN | CPACR_EL1_ZEN_EL1EN)
 
 #endif /* __ARM64_KVM_ARM_H__ */
index e923b58606e2bf8e33a76bd4a8a9a4c1336199c6..674912d7a571942b956c980aa2096efff32e85f9 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/types.h>
 #include <linux/kvm_types.h>
 #include <asm/cpufeature.h>
+#include <asm/fpsimd.h>
 #include <asm/kvm.h>
 #include <asm/kvm_asm.h>
 #include <asm/kvm_mmio.h>
@@ -384,4 +385,14 @@ static inline void __cpu_init_stage2(void)
                  "PARange is %d bits, unsupported configuration!", parange);
 }
 
+/*
+ * All host FP/SIMD state is restored on guest exit, so nothing needs
+ * doing here except in the SVE case:
+*/
+static inline void kvm_fpsimd_flush_cpu_state(void)
+{
+       if (system_supports_sve())
+               sve_flush_cpu_state();
+}
+
 #endif /* __ARM64_KVM_HOST_H__ */
index 88e5e6aef30063d4c425b78eb27df5901b60e418..931fd8dca91a9c8f47e2625f16d3cb65158e28b7 100644 (file)
@@ -1050,6 +1050,33 @@ void fpsimd_flush_task_state(struct task_struct *t)
        t->thread.fpsimd_state.cpu = NR_CPUS;
 }
 
+static inline void fpsimd_flush_cpu_state(void)
+{
+       __this_cpu_write(fpsimd_last_state, NULL);
+}
+
+/*
+ * Invalidate any task SVE state currently held in this CPU's regs.
+ *
+ * This is used to prevent the kernel from trying to reuse SVE register data
+ * that is detroyed by KVM guest enter/exit.  This function should go away when
+ * KVM SVE support is implemented.  Don't use it for anything else.
+ */
+#ifdef CONFIG_ARM64_SVE
+void sve_flush_cpu_state(void)
+{
+       struct fpsimd_state *const fpstate = __this_cpu_read(fpsimd_last_state);
+       struct task_struct *tsk;
+
+       if (!fpstate)
+               return;
+
+       tsk = container_of(fpstate, struct task_struct, thread.fpsimd_state);
+       if (test_tsk_thread_flag(tsk, TIF_SVE))
+               fpsimd_flush_cpu_state();
+}
+#endif /* CONFIG_ARM64_SVE */
+
 #ifdef CONFIG_KERNEL_MODE_NEON
 
 DEFINE_PER_CPU(bool, kernel_neon_busy);
@@ -1090,7 +1117,7 @@ void kernel_neon_begin(void)
        }
 
        /* Invalidate any task state remaining in the fpsimd regs: */
-       __this_cpu_write(fpsimd_last_state, NULL);
+       fpsimd_flush_cpu_state();
 
        preempt_disable();
 
@@ -1211,7 +1238,7 @@ static int fpsimd_cpu_pm_notifier(struct notifier_block *self,
        case CPU_PM_ENTER:
                if (current->mm)
                        task_fpsimd_save();
-               this_cpu_write(fpsimd_last_state, NULL);
+               fpsimd_flush_cpu_state();
                break;
        case CPU_PM_EXIT:
                if (current->mm)
index 35a90b8be3da9d4e5a5b698ec511c88c047b91e0..951f3ebaff26e6df8e58a8af1e63548c587e0b4d 100644 (file)
@@ -48,7 +48,7 @@ static void __hyp_text __activate_traps_vhe(void)
 
        val = read_sysreg(cpacr_el1);
        val |= CPACR_EL1_TTA;
-       val &= ~CPACR_EL1_FPEN;
+       val &= ~(CPACR_EL1_FPEN | CPACR_EL1_ZEN);
        write_sysreg(val, cpacr_el1);
 
        write_sysreg(__kvm_hyp_vector, vbar_el1);
@@ -59,7 +59,7 @@ static void __hyp_text __activate_traps_nvhe(void)
        u64 val;
 
        val = CPTR_EL2_DEFAULT;
-       val |= CPTR_EL2_TTA | CPTR_EL2_TFP;
+       val |= CPTR_EL2_TTA | CPTR_EL2_TFP | CPTR_EL2_TZ;
        write_sysreg(val, cptr_el2);
 }
 
@@ -117,7 +117,7 @@ static void __hyp_text __deactivate_traps_vhe(void)
 
        write_sysreg(mdcr_el2, mdcr_el2);
        write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
-       write_sysreg(CPACR_EL1_FPEN, cpacr_el1);
+       write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
        write_sysreg(vectors, vbar_el1);
 }
 
index b9f68e4add71015ea2a9757c4c30a7eed65cfa7b..4d3cf9c82f5bcd4fb8b7184003936ccaaecfaf69 100644 (file)
@@ -652,6 +652,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
                 */
                preempt_disable();
 
+               /* Flush FP/SIMD state that can't survive guest entry/exit */
+               kvm_fpsimd_flush_cpu_state();
+
                kvm_pmu_flush_hwstate(vcpu);
 
                kvm_timer_flush_hwstate(vcpu);