]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
iommu/amd: Unmap all L7 PTEs when downgrading page-sizes
authorAndrei Dulea <adulea@amazon.de>
Fri, 13 Sep 2019 14:42:31 +0000 (16:42 +0200)
committerJoerg Roedel <jroedel@suse.de>
Tue, 24 Sep 2019 09:15:51 +0000 (11:15 +0200)
When replacing a large mapping created with page-mode 7 (i.e.
non-default page size), tear down the entire series of replicated PTEs.
Besides providing access to the old mapping, another thing that might go
wrong with this issue is on the fetch_pte() code path that can return a
PDE entry of the newly re-mapped range.

While at it, make sure that we flush the TLB in case alloc_pte() fails
and returns NULL at a lower level.

Fixes: 6d568ef9a622 ("iommu/amd: Allow downgrading page-sizes in alloc_pte()")
Signed-off-by: Andrei Dulea <adulea@amazon.de>
drivers/iommu/amd_iommu.c

index a227e7a9b8b7b5dbfcfcaae9dce1ee9ef79393f1..fda9923542c90e1377b738ef3209ac33e67509c6 100644 (file)
@@ -1512,10 +1512,32 @@ static u64 *alloc_pte(struct protection_domain *domain,
                __pte     = *pte;
                pte_level = PM_PTE_LEVEL(__pte);
 
-               if (!IOMMU_PTE_PRESENT(__pte) ||
-                   pte_level == PAGE_MODE_NONE ||
+               /*
+                * If we replace a series of large PTEs, we need
+                * to tear down all of them.
+                */
+               if (IOMMU_PTE_PRESENT(__pte) &&
                    pte_level == PAGE_MODE_7_LEVEL) {
+                       unsigned long count, i;
+                       u64 *lpte;
+
+                       lpte = first_pte_l7(pte, NULL, &count);
+
+                       /*
+                        * Unmap the replicated PTEs that still match the
+                        * original large mapping
+                        */
+                       for (i = 0; i < count; ++i)
+                               cmpxchg64(&lpte[i], __pte, 0ULL);
+
+                       domain->updated = true;
+                       continue;
+               }
+
+               if (!IOMMU_PTE_PRESENT(__pte) ||
+                   pte_level == PAGE_MODE_NONE) {
                        page = (u64 *)get_zeroed_page(gfp);
+
                        if (!page)
                                return NULL;
 
@@ -1646,8 +1668,10 @@ static int iommu_map_page(struct protection_domain *dom,
        count = PAGE_SIZE_PTE_COUNT(page_size);
        pte   = alloc_pte(dom, bus_addr, page_size, NULL, gfp);
 
-       if (!pte)
+       if (!pte) {
+               update_domain(dom);
                return -ENOMEM;
+       }
 
        for (i = 0; i < count; ++i)
                freelist = free_clear_pte(&pte[i], pte[i], freelist);