]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
KVM: MMU: check guest CR3 reserved bits based on its physical address width.
authorYu Zhang <yu.c.zhang@linux.intel.com>
Thu, 24 Aug 2017 12:27:53 +0000 (20:27 +0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 24 Aug 2017 16:09:16 +0000 (18:09 +0200)
Currently, KVM uses CR3_L_MODE_RESERVED_BITS to check the
reserved bits in CR3. Yet the length of reserved bits in
guest CR3 should be based on the physical address width
exposed to the VM. This patch changes CR3 check logic to
calculate the reserved bits at runtime.

Signed-off-by: Yu Zhang <yu.c.zhang@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/emulate.c
arch/x86/kvm/mmu.h
arch/x86/kvm/x86.c

index 6db0ed9cf59ebb9ad7838e0d7aa2b031e34c6f55..e7162285b22ee4b72a9876506c87f357858d8995 100644 (file)
@@ -79,7 +79,6 @@
                          | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
                          | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
 
-#define CR3_L_MODE_RESERVED_BITS 0xFFFFFF0000000000ULL
 #define CR3_PCID_INVD           BIT_64(63)
 #define CR4_RESERVED_BITS                                               \
        (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
index 319d91f6f3c46bf45d41bbb10789d263ad0577ef..a89b595dab225c401d542506b016794e78d1e5a0 100644 (file)
@@ -28,6 +28,7 @@
 
 #include "x86.h"
 #include "tss.h"
+#include "mmu.h"
 
 /*
  * Operand types
@@ -4097,8 +4098,17 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt)
                u64 rsvd = 0;
 
                ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
-               if (efer & EFER_LMA)
-                       rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD;
+               if (efer & EFER_LMA) {
+                       u64 maxphyaddr;
+                       u32 eax = 0x80000008;
+
+                       if (ctxt->ops->get_cpuid(ctxt, &eax, NULL, NULL,
+                                                NULL, false))
+                               maxphyaddr = eax & 0xff;
+                       else
+                               maxphyaddr = 36;
+                       rsvd = rsvd_bits(maxphyaddr, 62);
+               }
 
                if (new_val & rsvd)
                        return emulate_gp(ctxt, 0);
index 3ed6192d93b14c97166c980f0cc4d39373cc1e5a..67e7ec2eb7e03ffef67795ac7ef12c6a94a91b78 100644 (file)
@@ -48,6 +48,9 @@
 
 static inline u64 rsvd_bits(int s, int e)
 {
+       if (e < s)
+               return 0;
+
        return ((1ULL << (e - s + 1)) - 1) << s;
 }
 
index cc2c7e413e9c64054ce48a03d92625efd38b79b8..79f5889f8c129f14af549b07ec509dffc2e982e2 100644 (file)
@@ -813,10 +813,10 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
                return 0;
        }
 
-       if (is_long_mode(vcpu)) {
-               if (cr3 & CR3_L_MODE_RESERVED_BITS)
-                       return 1;
-       else if (is_pae(vcpu) && is_paging(vcpu) &&
+       if (is_long_mode(vcpu) &&
+           (cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 62)))
+               return 1;
+       else if (is_pae(vcpu) && is_paging(vcpu) &&
                   !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
                return 1;