]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
KVM: MMU: add missing reserved bits check in speculative path
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Fri, 16 Jul 2010 03:19:51 +0000 (11:19 +0800)
committerAvi Kivity <avi@redhat.com>
Mon, 2 Aug 2010 03:40:56 +0000 (06:40 +0300)
In the speculative path, we should check guest pte's reserved bits just as
the real processor does

Reported-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/x86/kvm/mmu.c
arch/x86/kvm/paging_tmpl.h

index 812770cddc8d5456f59056996531357cf9592c4f..d2ea9cabc066a2b4b2af7358fde2a105cca966f2 100644 (file)
@@ -2697,6 +2697,9 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
                return;
         }
 
+       if (is_rsvd_bits_set(vcpu, *(u64 *)new, PT_PAGE_TABLE_LEVEL))
+               return;
+
        ++vcpu->kvm->stat.mmu_pte_updated;
        if (!sp->role.cr4_pae)
                paging32_update_pte(vcpu, sp, spte, new);
@@ -2775,6 +2778,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
                       bool guest_initiated)
 {
        gfn_t gfn = gpa >> PAGE_SHIFT;
+       union kvm_mmu_page_role mask = { .word = 0 };
        struct kvm_mmu_page *sp;
        struct hlist_node *node;
        LIST_HEAD(invalid_list);
@@ -2849,6 +2853,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
                }
        }
 
+       mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
        for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) {
                pte_size = sp->role.cr4_pae ? 8 : 4;
                misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
@@ -2896,7 +2901,9 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
                while (npte--) {
                        entry = *spte;
                        mmu_pte_write_zap_pte(vcpu, sp, spte);
-                       if (gentry)
+                       if (gentry &&
+                             !((sp->role.word ^ vcpu->arch.mmu.base_role.word)
+                             & mask.word))
                                mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
                        if (!remote_flush && need_remote_flush(entry, *spte))
                                remote_flush = true;
index d9a2742014e3d706a82ee7e57ab124f7f866f8d2..51ef9097960d43624b462f6f0a698553b292f362 100644 (file)
@@ -638,8 +638,9 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
                        return -EINVAL;
 
                gfn = gpte_to_gfn(gpte);
-               if (gfn != sp->gfns[i] ||
-                     !is_present_gpte(gpte) || !(gpte & PT_ACCESSED_MASK)) {
+               if (is_rsvd_bits_set(vcpu, gpte, PT_PAGE_TABLE_LEVEL)
+                     || gfn != sp->gfns[i] || !is_present_gpte(gpte)
+                     || !(gpte & PT_ACCESSED_MASK)) {
                        u64 nonpresent;
 
                        if (is_present_gpte(gpte) || !clear_unsync)