]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
powerpc/mm: unstub radix__vmemmap_remove_mapping()
authorReza Arbab <arbab@linux.vnet.ibm.com>
Mon, 16 Jan 2017 19:07:46 +0000 (13:07 -0600)
committerMichael Ellerman <mpe@ellerman.id.au>
Tue, 31 Jan 2017 02:54:20 +0000 (13:54 +1100)
Use remove_pagetable() and friends for radix vmemmap removal.

We do not require the special-case handling of vmemmap done in the x86
versions of these functions. This is because vmemmap_free() has already
freed the mapped pages, and calls us with an aligned address range.

So, add a few failsafe WARNs, but otherwise the code to remove physical
mappings is already sufficient for vmemmap.

Signed-off-by: Reza Arbab <arbab@linux.vnet.ibm.com>
Acked-by: Balbir Singh <bsingharora@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/mm/pgtable-radix.c

index aef9d49f70ce627d41e91fcf99acbdf6020ef84d..30374586e01d9efa392fa027d15c34e98096f2ce 100644 (file)
@@ -527,6 +527,15 @@ static void remove_pte_table(pte_t *pte_start, unsigned long addr,
                if (!pte_present(*pte))
                        continue;
 
+               if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(next)) {
+                       /*
+                        * The vmemmap_free() and remove_section_mapping()
+                        * codepaths call us with aligned addresses.
+                        */
+                       WARN_ONCE(1, "%s: unaligned range\n", __func__);
+                       continue;
+               }
+
                pte_clear(&init_mm, addr, pte);
        }
 }
@@ -546,6 +555,12 @@ static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
                        continue;
 
                if (pmd_huge(*pmd)) {
+                       if (!IS_ALIGNED(addr, PMD_SIZE) ||
+                           !IS_ALIGNED(next, PMD_SIZE)) {
+                               WARN_ONCE(1, "%s: unaligned range\n", __func__);
+                               continue;
+                       }
+
                        pte_clear(&init_mm, addr, (pte_t *)pmd);
                        continue;
                }
@@ -571,6 +586,12 @@ static void remove_pud_table(pud_t *pud_start, unsigned long addr,
                        continue;
 
                if (pud_huge(*pud)) {
+                       if (!IS_ALIGNED(addr, PUD_SIZE) ||
+                           !IS_ALIGNED(next, PUD_SIZE)) {
+                               WARN_ONCE(1, "%s: unaligned range\n", __func__);
+                               continue;
+                       }
+
                        pte_clear(&init_mm, addr, (pte_t *)pud);
                        continue;
                }
@@ -597,6 +618,12 @@ static void remove_pagetable(unsigned long start, unsigned long end)
                        continue;
 
                if (pgd_huge(*pgd)) {
+                       if (!IS_ALIGNED(addr, PGDIR_SIZE) ||
+                           !IS_ALIGNED(next, PGDIR_SIZE)) {
+                               WARN_ONCE(1, "%s: unaligned range\n", __func__);
+                               continue;
+                       }
+
                        pte_clear(&init_mm, addr, (pte_t *)pgd);
                        continue;
                }
@@ -636,7 +663,7 @@ int __meminit radix__vmemmap_create_mapping(unsigned long start,
 #ifdef CONFIG_MEMORY_HOTPLUG
 void radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size)
 {
-       /* FIXME!! intel does more. We should free page tables mapping vmemmap ? */
+       remove_pagetable(start, start + page_size);
 }
 #endif
 #endif