]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - arch/x86/kvm/mmu.c
KVM: x86/mmu: Use fast invalidate mechanism to zap MMIO sptes
[linux.git] / arch / x86 / kvm / mmu.c
index a10af9c87f8ac1a2b42bf5a6f182840bd2fce276..6319f1e208f6da8bb10d5171fe9e2d546ffbde89 100644 (file)
@@ -403,8 +403,6 @@ static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
        mask |= (gpa & shadow_nonpresent_or_rsvd_mask)
                << shadow_nonpresent_or_rsvd_mask_len;
 
-       page_header(__pa(sptep))->mmio_cached = true;
-
        trace_mark_mmio_spte(sptep, gfn, access, gen);
        mmu_spte_set(sptep, mask);
 }
@@ -2252,7 +2250,7 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
 #define for_each_valid_sp(_kvm, _sp, _gfn)                             \
        hlist_for_each_entry(_sp,                                       \
          &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
-               if (is_obsolete_sp((_kvm), (_sp)) || (_sp)->role.invalid) {    \
+               if (is_obsolete_sp((_kvm), (_sp))) {                    \
                } else
 
 #define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn)                        \
@@ -2311,7 +2309,8 @@ static void mmu_audit_disable(void) { }
 
 static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
 {
-       return unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
+       return sp->role.invalid ||
+              unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
 }
 
 static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
@@ -5383,7 +5382,6 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
                       void *insn, int insn_len)
 {
        int r, emulation_type = 0;
-       enum emulation_result er;
        bool direct = vcpu->arch.mmu->direct_map;
 
        /* With shadow page tables, fault_address contains a GVA or nGPA.  */
@@ -5450,19 +5448,8 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
                        return 1;
        }
 
-       er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len);
-
-       switch (er) {
-       case EMULATE_DONE:
-               return 1;
-       case EMULATE_USER_EXIT:
-               ++vcpu->stat.mmio_exits;
-               /* fall through */
-       case EMULATE_FAIL:
-               return 0;
-       default:
-               BUG();
-       }
+       return x86_emulate_instruction(vcpu, cr2, emulation_type, insn,
+                                      insn_len);
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
 
@@ -5959,7 +5946,7 @@ void kvm_mmu_slot_set_dirty(struct kvm *kvm,
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);
 
-static void __kvm_mmu_zap_all(struct kvm *kvm, bool mmio_only)
+void kvm_mmu_zap_all(struct kvm *kvm)
 {
        struct kvm_mmu_page *sp, *node;
        LIST_HEAD(invalid_list);
@@ -5968,14 +5955,10 @@ static void __kvm_mmu_zap_all(struct kvm *kvm, bool mmio_only)
        spin_lock(&kvm->mmu_lock);
 restart:
        list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
-               if (mmio_only && !sp->mmio_cached)
-                       continue;
                if (sp->role.invalid && sp->root_count)
                        continue;
-               if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign)) {
-                       WARN_ON_ONCE(mmio_only);
+               if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
                        goto restart;
-               }
                if (cond_resched_lock(&kvm->mmu_lock))
                        goto restart;
        }
@@ -5984,11 +5967,6 @@ static void __kvm_mmu_zap_all(struct kvm *kvm, bool mmio_only)
        spin_unlock(&kvm->mmu_lock);
 }
 
-void kvm_mmu_zap_all(struct kvm *kvm)
-{
-       return __kvm_mmu_zap_all(kvm, false);
-}
-
 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
 {
        WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
@@ -6010,7 +5988,7 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
         */
        if (unlikely(gen == 0)) {
                kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
-               __kvm_mmu_zap_all(kvm, true);
+               kvm_mmu_zap_all_fast(kvm);
        }
 }