]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - arch/x86/mm/init_64.c
Merge tag 'mips_fixes_4.16_2' of git://git.kernel.org/pub/scm/linux/kernel/git/jhogan...
[linux.git] / arch / x86 / mm / init_64.c
index 4a837289f2add8ac3548e68639f38f845ceee6a9..8b72923f1d35c07c5ded42ae36873790da02d247 100644 (file)
@@ -256,7 +256,7 @@ static void __set_pte_vaddr(pud_t *pud, unsigned long vaddr, pte_t new_pte)
         * It's enough to flush this one mapping.
         * (PGE mappings get flushed as well)
         */
-       __flush_tlb_one(vaddr);
+       __flush_tlb_one_kernel(vaddr);
 }
 
 void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte)
@@ -772,12 +772,12 @@ static void update_end_of_memory_vars(u64 start, u64 size)
        }
 }
 
-int add_pages(int nid, unsigned long start_pfn,
-             unsigned long nr_pages, bool want_memblock)
+int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
+               struct vmem_altmap *altmap, bool want_memblock)
 {
        int ret;
 
-       ret = __add_pages(nid, start_pfn, nr_pages, want_memblock);
+       ret = __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
        WARN_ON_ONCE(ret);
 
        /* update max_pfn, max_low_pfn and high_memory */
@@ -787,24 +787,24 @@ int add_pages(int nid, unsigned long start_pfn,
        return ret;
 }
 
-int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
+int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
+               bool want_memblock)
 {
        unsigned long start_pfn = start >> PAGE_SHIFT;
        unsigned long nr_pages = size >> PAGE_SHIFT;
 
        init_memory_mapping(start, start + size);
 
-       return add_pages(nid, start_pfn, nr_pages, want_memblock);
+       return add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
 }
-EXPORT_SYMBOL_GPL(arch_add_memory);
 
 #define PAGE_INUSE 0xFD
 
-static void __meminit free_pagetable(struct page *page, int order)
+static void __meminit free_pagetable(struct page *page, int order,
+               struct vmem_altmap *altmap)
 {
        unsigned long magic;
        unsigned int nr_pages = 1 << order;
-       struct vmem_altmap *altmap = to_vmem_altmap((unsigned long) page);
 
        if (altmap) {
                vmem_altmap_free(altmap, nr_pages);
@@ -826,7 +826,8 @@ static void __meminit free_pagetable(struct page *page, int order)
                free_pages((unsigned long)page_address(page), order);
 }
 
-static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
+static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd,
+               struct vmem_altmap *altmap)
 {
        pte_t *pte;
        int i;
@@ -838,13 +839,14 @@ static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
        }
 
        /* free a pte talbe */
-       free_pagetable(pmd_page(*pmd), 0);
+       free_pagetable(pmd_page(*pmd), 0, altmap);
        spin_lock(&init_mm.page_table_lock);
        pmd_clear(pmd);
        spin_unlock(&init_mm.page_table_lock);
 }
 
-static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
+static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud,
+               struct vmem_altmap *altmap)
 {
        pmd_t *pmd;
        int i;
@@ -856,13 +858,14 @@ static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
        }
 
        /* free a pmd talbe */
-       free_pagetable(pud_page(*pud), 0);
+       free_pagetable(pud_page(*pud), 0, altmap);
        spin_lock(&init_mm.page_table_lock);
        pud_clear(pud);
        spin_unlock(&init_mm.page_table_lock);
 }
 
-static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d)
+static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d,
+               struct vmem_altmap *altmap)
 {
        pud_t *pud;
        int i;
@@ -874,7 +877,7 @@ static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d)
        }
 
        /* free a pud talbe */
-       free_pagetable(p4d_page(*p4d), 0);
+       free_pagetable(p4d_page(*p4d), 0, altmap);
        spin_lock(&init_mm.page_table_lock);
        p4d_clear(p4d);
        spin_unlock(&init_mm.page_table_lock);
@@ -882,7 +885,7 @@ static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d)
 
 static void __meminit
 remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
-                bool direct)
+                struct vmem_altmap *altmap, bool direct)
 {
        unsigned long next, pages = 0;
        pte_t *pte;
@@ -913,7 +916,7 @@ remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
                         * freed when offlining, or simplely not in use.
                         */
                        if (!direct)
-                               free_pagetable(pte_page(*pte), 0);
+                               free_pagetable(pte_page(*pte), 0, altmap);
 
                        spin_lock(&init_mm.page_table_lock);
                        pte_clear(&init_mm, addr, pte);
@@ -936,7 +939,7 @@ remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
 
                        page_addr = page_address(pte_page(*pte));
                        if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) {
-                               free_pagetable(pte_page(*pte), 0);
+                               free_pagetable(pte_page(*pte), 0, altmap);
 
                                spin_lock(&init_mm.page_table_lock);
                                pte_clear(&init_mm, addr, pte);
@@ -953,7 +956,7 @@ remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
 
 static void __meminit
 remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
-                bool direct)
+                bool direct, struct vmem_altmap *altmap)
 {
        unsigned long next, pages = 0;
        pte_t *pte_base;
@@ -972,7 +975,8 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
                            IS_ALIGNED(next, PMD_SIZE)) {
                                if (!direct)
                                        free_pagetable(pmd_page(*pmd),
-                                                      get_order(PMD_SIZE));
+                                                      get_order(PMD_SIZE),
+                                                      altmap);
 
                                spin_lock(&init_mm.page_table_lock);
                                pmd_clear(pmd);
@@ -986,7 +990,8 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
                                if (!memchr_inv(page_addr, PAGE_INUSE,
                                                PMD_SIZE)) {
                                        free_pagetable(pmd_page(*pmd),
-                                                      get_order(PMD_SIZE));
+                                                      get_order(PMD_SIZE),
+                                                      altmap);
 
                                        spin_lock(&init_mm.page_table_lock);
                                        pmd_clear(pmd);
@@ -998,8 +1003,8 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
                }
 
                pte_base = (pte_t *)pmd_page_vaddr(*pmd);
-               remove_pte_table(pte_base, addr, next, direct);
-               free_pte_table(pte_base, pmd);
+               remove_pte_table(pte_base, addr, next, altmap, direct);
+               free_pte_table(pte_base, pmd, altmap);
        }
 
        /* Call free_pmd_table() in remove_pud_table(). */
@@ -1009,7 +1014,7 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
 
 static void __meminit
 remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
-                bool direct)
+                struct vmem_altmap *altmap, bool direct)
 {
        unsigned long next, pages = 0;
        pmd_t *pmd_base;
@@ -1028,7 +1033,8 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
                            IS_ALIGNED(next, PUD_SIZE)) {
                                if (!direct)
                                        free_pagetable(pud_page(*pud),
-                                                      get_order(PUD_SIZE));
+                                                      get_order(PUD_SIZE),
+                                                      altmap);
 
                                spin_lock(&init_mm.page_table_lock);
                                pud_clear(pud);
@@ -1042,7 +1048,8 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
                                if (!memchr_inv(page_addr, PAGE_INUSE,
                                                PUD_SIZE)) {
                                        free_pagetable(pud_page(*pud),
-                                                      get_order(PUD_SIZE));
+                                                      get_order(PUD_SIZE),
+                                                      altmap);
 
                                        spin_lock(&init_mm.page_table_lock);
                                        pud_clear(pud);
@@ -1054,8 +1061,8 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
                }
 
                pmd_base = pmd_offset(pud, 0);
-               remove_pmd_table(pmd_base, addr, next, direct);
-               free_pmd_table(pmd_base, pud);
+               remove_pmd_table(pmd_base, addr, next, direct, altmap);
+               free_pmd_table(pmd_base, pud, altmap);
        }
 
        if (direct)
@@ -1064,7 +1071,7 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
 
 static void __meminit
 remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end,
-                bool direct)
+                struct vmem_altmap *altmap, bool direct)
 {
        unsigned long next, pages = 0;
        pud_t *pud_base;
@@ -1080,14 +1087,14 @@ remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end,
                BUILD_BUG_ON(p4d_large(*p4d));
 
                pud_base = pud_offset(p4d, 0);
-               remove_pud_table(pud_base, addr, next, direct);
+               remove_pud_table(pud_base, addr, next, altmap, direct);
                /*
                 * For 4-level page tables we do not want to free PUDs, but in the
                 * 5-level case we should free them. This code will have to change
                 * to adapt for boot-time switching between 4 and 5 level page tables.
                 */
                if (CONFIG_PGTABLE_LEVELS == 5)
-                       free_pud_table(pud_base, p4d);
+                       free_pud_table(pud_base, p4d, altmap);
        }
 
        if (direct)
@@ -1096,7 +1103,8 @@ remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end,
 
 /* start and end are both virtual address. */
 static void __meminit
-remove_pagetable(unsigned long start, unsigned long end, bool direct)
+remove_pagetable(unsigned long start, unsigned long end, bool direct,
+               struct vmem_altmap *altmap)
 {
        unsigned long next;
        unsigned long addr;
@@ -1111,15 +1119,16 @@ remove_pagetable(unsigned long start, unsigned long end, bool direct)
                        continue;
 
                p4d = p4d_offset(pgd, 0);
-               remove_p4d_table(p4d, addr, next, direct);
+               remove_p4d_table(p4d, addr, next, altmap, direct);
        }
 
        flush_tlb_all();
 }
 
-void __ref vmemmap_free(unsigned long start, unsigned long end)
+void __ref vmemmap_free(unsigned long start, unsigned long end,
+               struct vmem_altmap *altmap)
 {
-       remove_pagetable(start, end, false);
+       remove_pagetable(start, end, false, altmap);
 }
 
 #ifdef CONFIG_MEMORY_HOTREMOVE
@@ -1129,24 +1138,22 @@ kernel_physical_mapping_remove(unsigned long start, unsigned long end)
        start = (unsigned long)__va(start);
        end = (unsigned long)__va(end);
 
-       remove_pagetable(start, end, true);
+       remove_pagetable(start, end, true, NULL);
 }
 
-int __ref arch_remove_memory(u64 start, u64 size)
+int __ref arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
 {
        unsigned long start_pfn = start >> PAGE_SHIFT;
        unsigned long nr_pages = size >> PAGE_SHIFT;
        struct page *page = pfn_to_page(start_pfn);
-       struct vmem_altmap *altmap;
        struct zone *zone;
        int ret;
 
        /* With altmap the first mapped page is offset from @start */
-       altmap = to_vmem_altmap((unsigned long) page);
        if (altmap)
                page += vmem_altmap_offset(altmap);
        zone = page_zone(page);
-       ret = __remove_pages(zone, start_pfn, nr_pages);
+       ret = __remove_pages(zone, start_pfn, nr_pages, altmap);
        WARN_ON_ONCE(ret);
        kernel_physical_mapping_remove(start, start + size);
 
@@ -1186,8 +1193,8 @@ void __init mem_init(void)
        register_page_bootmem_info();
 
        /* Register memory areas for /proc/kcore */
-       kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR,
-                        PAGE_SIZE, KCORE_OTHER);
+       if (get_gate_vma(&init_mm))
+               kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, KCORE_USER);
 
        mem_init_print_info(NULL);
 }
@@ -1378,7 +1385,10 @@ static int __meminit vmemmap_populate_hugepages(unsigned long start,
                if (pmd_none(*pmd)) {
                        void *p;
 
-                       p = __vmemmap_alloc_block_buf(PMD_SIZE, node, altmap);
+                       if (altmap)
+                               p = altmap_alloc_block_buf(PMD_SIZE, altmap);
+                       else
+                               p = vmemmap_alloc_block_buf(PMD_SIZE, node);
                        if (p) {
                                pte_t entry;
 
@@ -1411,9 +1421,9 @@ static int __meminit vmemmap_populate_hugepages(unsigned long start,
        return 0;
 }
 
-int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
+int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
+               struct vmem_altmap *altmap)
 {
-       struct vmem_altmap *altmap = to_vmem_altmap(start);
        int err;
 
        if (boot_cpu_has(X86_FEATURE_PSE))