]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
KVM: x86/mmu: Remove lpage_is_disallowed() check from set_spte()
authorSean Christopherson <sean.j.christopherson@intel.com>
Wed, 8 Jan 2020 20:24:47 +0000 (12:24 -0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 27 Jan 2020 19:00:09 +0000 (20:00 +0100)
Remove the late "lpage is disallowed" check from set_spte() now that the
initial check is performed after acquiring mmu_lock.  Fold the guts of
the remaining helper, __mmu_gfn_lpage_is_disallowed(), into
kvm_mmu_hugepage_adjust() to eliminate the unnecessary slot !NULL check.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu/mmu.c

index 812c69f7f552cf26b77b44f2d9fcafe491710e40..a9e6683c802b5a91150e0ca1d84891e050136d46 100644 (file)
@@ -1264,28 +1264,6 @@ static void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
        list_del(&sp->lpage_disallowed_link);
 }
 
-static bool __mmu_gfn_lpage_is_disallowed(gfn_t gfn, int level,
-                                         struct kvm_memory_slot *slot)
-{
-       struct kvm_lpage_info *linfo;
-
-       if (slot) {
-               linfo = lpage_info_slot(gfn, slot, level);
-               return !!linfo->disallow_lpage;
-       }
-
-       return true;
-}
-
-static bool mmu_gfn_lpage_is_disallowed(struct kvm_vcpu *vcpu, gfn_t gfn,
-                                       int level)
-{
-       struct kvm_memory_slot *slot;
-
-       slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
-       return __mmu_gfn_lpage_is_disallowed(gfn, level, slot);
-}
-
 static inline bool memslot_valid_for_gpte(struct kvm_memory_slot *slot,
                                          bool no_dirty_log)
 {
@@ -3078,18 +3056,6 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
        spte |= (u64)pfn << PAGE_SHIFT;
 
        if (pte_access & ACC_WRITE_MASK) {
-
-               /*
-                * Legacy code to handle an obsolete scenario where a different
-                * vcpu creates new sp in the window between this vcpu's query
-                * of lpage_is_disallowed() and acquiring mmu_lock.  No longer
-                * necessary now that lpage_is_disallowed() is called after
-                * acquiring mmu_lock.
-                */
-               if (level > PT_PAGE_TABLE_LEVEL &&
-                   mmu_gfn_lpage_is_disallowed(vcpu, gfn, level))
-                       goto done;
-
                spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE;
 
                /*
@@ -3121,7 +3087,6 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
 set_pte:
        if (mmu_spte_update(sptep, spte))
                ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
-done:
        return ret;
 }
 
@@ -3309,6 +3274,7 @@ static int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
                                   int max_level, kvm_pfn_t *pfnp)
 {
        struct kvm_memory_slot *slot;
+       struct kvm_lpage_info *linfo;
        kvm_pfn_t pfn = *pfnp;
        kvm_pfn_t mask;
        int level;
@@ -3326,7 +3292,8 @@ static int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
 
        max_level = min(max_level, kvm_x86_ops->get_lpage_level());
        for ( ; max_level > PT_PAGE_TABLE_LEVEL; max_level--) {
-               if (!__mmu_gfn_lpage_is_disallowed(gfn, max_level, slot))
+               linfo = lpage_info_slot(gfn, slot, max_level);
+               if (!linfo->disallow_lpage)
                        break;
        }