]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
KVM: arm64: Safety check PSTATE when entering guest and handle IL
authorChristoffer Dall <christoffer.dall@arm.com>
Wed, 17 Oct 2018 18:21:16 +0000 (20:21 +0200)
committerMarc Zyngier <marc.zyngier@arm.com>
Fri, 19 Oct 2018 10:13:03 +0000 (11:13 +0100)
This commit adds a paranoid check when entering the guest to make sure
we don't attempt running guest code in an equally or more privilged mode
than the hypervisor.  We also catch other accidental programming of the
SPSR_EL2 which results in an illegal exception return and report this
safely back to the user.

Signed-off-by: Christoffer Dall <christoffer.dall@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
arch/arm64/include/asm/kvm_asm.h
arch/arm64/include/asm/ptrace.h
arch/arm64/kvm/handle_exit.c
arch/arm64/kvm/hyp/hyp-entry.S
arch/arm64/kvm/hyp/sysreg-sr.c

index 0b53c72e759101eeb5f23d25f9f79940f81dc2ba..aea01a09eb9474a8112adfc14a7ad22e743aa77b 100644 (file)
@@ -30,6 +30,7 @@
 #define ARM_EXCEPTION_IRQ        0
 #define ARM_EXCEPTION_EL1_SERROR  1
 #define ARM_EXCEPTION_TRAP       2
+#define ARM_EXCEPTION_IL         3
 /* The hyp-stub will return this for any kvm_call_hyp() call */
 #define ARM_EXCEPTION_HYP_GONE   HVC_STUB_ERR
 
index 177b851ca6d997741580e73c89e448f274ac3ba7..ff35ac1258eb7b495c900e52f7da9a49993a2a1d 100644 (file)
@@ -25,6 +25,9 @@
 #define CurrentEL_EL1          (1 << 2)
 #define CurrentEL_EL2          (2 << 2)
 
+/* Additional SPSR bits not exposed in the UABI */
+#define PSR_IL_BIT             (1 << 20)
+
 /* AArch32-specific ptrace requests */
 #define COMPAT_PTRACE_GETREGS          12
 #define COMPAT_PTRACE_SETREGS          13
index e5e741bfffe19140d3551334a6b425bf4c2b7672..35a81bebd02bcf30738b3121fc980e663097c17d 100644 (file)
@@ -284,6 +284,13 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
                 */
                run->exit_reason = KVM_EXIT_FAIL_ENTRY;
                return 0;
+       case ARM_EXCEPTION_IL:
+               /*
+                * We attempted an illegal exception return.  Guest state must
+                * have been corrupted somehow.  Give up.
+                */
+               run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+               return -EINVAL;
        default:
                kvm_pr_unimpl("Unsupported exception type: %d",
                              exception_index);
index 24b4fbafe3e4ac9f9c30aaa2da04c16a798bf9ff..b1f14f736962f938911088f63d1f833f9a7c2c81 100644 (file)
@@ -162,6 +162,20 @@ el1_error:
        mov     x0, #ARM_EXCEPTION_EL1_SERROR
        b       __guest_exit
 
+el2_sync:
+       /* Check for illegal exception return, otherwise panic */
+       mrs     x0, spsr_el2
+
+       /* if this was something else, then panic! */
+       tst     x0, #PSR_IL_BIT
+       b.eq    __hyp_panic
+
+       /* Let's attempt a recovery from the illegal exception return */
+       get_vcpu_ptr    x1, x0
+       mov     x0, #ARM_EXCEPTION_IL
+       b       __guest_exit
+
+
 el2_error:
        ldp     x0, x1, [sp], #16
 
@@ -240,7 +254,7 @@ ENTRY(__kvm_hyp_vector)
        invalid_vect    el2t_fiq_invalid        // FIQ EL2t
        invalid_vect    el2t_error_invalid      // Error EL2t
 
-       invalid_vect    el2h_sync_invalid       // Synchronous EL2h
+       valid_vect      el2_sync                // Synchronous EL2h
        invalid_vect    el2h_irq_invalid        // IRQ EL2h
        invalid_vect    el2h_fiq_invalid        // FIQ EL2h
        valid_vect      el2_error               // Error EL2h
index 9ce223944983b803e3b4161d57a5ff6e17e8ec5b..8dc28531820414061040c657e6066815d248e589 100644 (file)
@@ -152,8 +152,25 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
 static void __hyp_text
 __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt)
 {
+       u64 pstate = ctxt->gp_regs.regs.pstate;
+       u64 mode = pstate & PSR_AA32_MODE_MASK;
+
+       /*
+        * Safety check to ensure we're setting the CPU up to enter the guest
+        * in a less privileged mode.
+        *
+        * If we are attempting a return to EL2 or higher in AArch64 state,
+        * program SPSR_EL2 with M=EL2h and the IL bit set which ensures that
+        * we'll take an illegal exception state exception immediately after
+        * the ERET to the guest.  Attempts to return to AArch32 Hyp will
+        * result in an illegal exception return because EL2's execution state
+        * is determined by SCR_EL3.RW.
+        */
+       if (!(mode & PSR_MODE32_BIT) && mode >= PSR_MODE_EL2t)
+               pstate = PSR_MODE_EL2h | PSR_IL_BIT;
+
        write_sysreg_el2(ctxt->gp_regs.regs.pc,         elr);
-       write_sysreg_el2(ctxt->gp_regs.regs.pstate,     spsr);
+       write_sysreg_el2(pstate,                        spsr);
 
        if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
                write_sysreg_s(ctxt->sys_regs[DISR_EL1], SYS_VDISR_EL2);