]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
KVM: nVMX: fix checks on CR{0,4} during virtual VMX operation
authorDavid Matlack <dmatlack@google.com>
Wed, 30 Nov 2016 02:14:08 +0000 (18:14 -0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 8 Dec 2016 14:31:07 +0000 (15:31 +0100)
KVM emulates MSR_IA32_VMX_CR{0,4}_FIXED1 with the value -1ULL, meaning
all CR0 and CR4 bits are allowed to be 1 during VMX operation.

This does not match real hardware, which disallows the high 32 bits of
CR0 to be 1, and disallows reserved bits of CR4 to be 1 (including bits
which are defined in the SDM but missing according to CPUID). A guest
can induce a VM-entry failure by setting these bits in GUEST_CR0 and
GUEST_CR4, despite MSR_IA32_VMX_CR{0,4}_FIXED1 indicating they are
valid.

Since KVM has allowed all bits to be 1 in CR0 and CR4, the existing
checks on these registers do not verify must-be-0 bits. Fix these checks
to identify must-be-0 bits according to MSR_IA32_VMX_CR{0,4}_FIXED1.

This patch should introduce no change in behavior in KVM, since these
MSRs are still -1ULL.

Signed-off-by: David Matlack <dmatlack@google.com>
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
arch/x86/kvm/vmx.c

index e0397c60e9861cdc4c9533009c5d7d3fef7c4695..2b8d4f5ac246fa3a38a019c6b714b92b176af0d0 100644 (file)
@@ -2892,12 +2892,18 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
        vmx->nested.nested_vmx_vmcs_enum = 0x2e;
 }
 
+/*
+ * if fixed0[i] == 1: val[i] must be 1
+ * if fixed1[i] == 0: val[i] must be 0
+ */
+static inline bool fixed_bits_valid(u64 val, u64 fixed0, u64 fixed1)
+{
+       return ((val & fixed1) | fixed0) == val;
+}
+
 static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
 {
-       /*
-        * Bits 0 in high must be 0, and bits 1 in low must be 1.
-        */
-       return ((control & high) | low) == control;
+       return fixed_bits_valid(control, low, high);
 }
 
 static inline u64 vmx_control_msr(u32 low, u32 high)
@@ -4132,6 +4138,40 @@ static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
                  (unsigned long *)&vcpu->arch.regs_dirty);
 }
 
+static bool nested_guest_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
+{
+       u64 fixed0 = to_vmx(vcpu)->nested.nested_vmx_cr0_fixed0;
+       u64 fixed1 = to_vmx(vcpu)->nested.nested_vmx_cr0_fixed1;
+       struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+
+       if (to_vmx(vcpu)->nested.nested_vmx_secondary_ctls_high &
+               SECONDARY_EXEC_UNRESTRICTED_GUEST &&
+           nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
+               fixed0 &= ~(X86_CR0_PE | X86_CR0_PG);
+
+       return fixed_bits_valid(val, fixed0, fixed1);
+}
+
+static bool nested_host_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
+{
+       u64 fixed0 = to_vmx(vcpu)->nested.nested_vmx_cr0_fixed0;
+       u64 fixed1 = to_vmx(vcpu)->nested.nested_vmx_cr0_fixed1;
+
+       return fixed_bits_valid(val, fixed0, fixed1);
+}
+
+static bool nested_cr4_valid(struct kvm_vcpu *vcpu, unsigned long val)
+{
+       u64 fixed0 = to_vmx(vcpu)->nested.nested_vmx_cr4_fixed0;
+       u64 fixed1 = to_vmx(vcpu)->nested.nested_vmx_cr4_fixed1;
+
+       return fixed_bits_valid(val, fixed0, fixed1);
+}
+
+/* No difference in the restrictions on guest and host CR4 in VMX operation. */
+#define nested_guest_cr4_valid nested_cr4_valid
+#define nested_host_cr4_valid  nested_cr4_valid
+
 static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
 
 static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
@@ -4260,8 +4300,8 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
                if (!nested_vmx_allowed(vcpu))
                        return 1;
        }
-       if (to_vmx(vcpu)->nested.vmxon &&
-           ((cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON))
+
+       if (to_vmx(vcpu)->nested.vmxon && !nested_cr4_valid(vcpu, cr4))
                return 1;
 
        vcpu->arch.cr4 = cr4;
@@ -5826,18 +5866,6 @@ vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
        hypercall[2] = 0xc1;
 }
 
-static bool nested_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
-{
-       unsigned long always_on = VMXON_CR0_ALWAYSON;
-       struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
-
-       if (to_vmx(vcpu)->nested.nested_vmx_secondary_ctls_high &
-               SECONDARY_EXEC_UNRESTRICTED_GUEST &&
-           nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
-               always_on &= ~(X86_CR0_PE | X86_CR0_PG);
-       return (val & always_on) == always_on;
-}
-
 /* called to set cr0 as appropriate for a mov-to-cr0 exit. */
 static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
 {
@@ -5856,7 +5884,7 @@ static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
                val = (val & ~vmcs12->cr0_guest_host_mask) |
                        (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask);
 
-               if (!nested_cr0_valid(vcpu, val))
+               if (!nested_guest_cr0_valid(vcpu, val))
                        return 1;
 
                if (kvm_set_cr0(vcpu, val))
@@ -5865,8 +5893,9 @@ static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
                return 0;
        } else {
                if (to_vmx(vcpu)->nested.vmxon &&
-                   ((val & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON))
+                   !nested_host_cr0_valid(vcpu, val))
                        return 1;
+
                return kvm_set_cr0(vcpu, val);
        }
 }
@@ -10325,15 +10354,15 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
                goto out;
        }
 
-       if (((vmcs12->host_cr0 & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON) ||
-           ((vmcs12->host_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) {
+       if (!nested_host_cr0_valid(vcpu, vmcs12->host_cr0) ||
+           !nested_host_cr4_valid(vcpu, vmcs12->host_cr4)) {
                nested_vmx_failValid(vcpu,
                        VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
                goto out;
        }
 
-       if (!nested_cr0_valid(vcpu, vmcs12->guest_cr0) ||
-           ((vmcs12->guest_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) {
+       if (!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0) ||
+           !nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4)) {
                nested_vmx_entry_failure(vcpu, vmcs12,
                        EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
                goto out;