]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
mm: pass the vmem_altmap to vmemmap_populate
authorChristoph Hellwig <hch@lst.de>
Fri, 29 Dec 2017 07:53:54 +0000 (08:53 +0100)
committerDan Williams <dan.j.williams@intel.com>
Mon, 8 Jan 2018 19:46:23 +0000 (11:46 -0800)
We can just pass this on instead of having to do a radix tree lookup
without proper locking a few levels into the callchain.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
arch/arm64/mm/mmu.c
arch/ia64/mm/discontig.c
arch/powerpc/mm/init_64.c
arch/s390/mm/vmem.c
arch/sparc/mm/init_64.c
arch/x86/mm/init_64.c
include/linux/memory_hotplug.h
include/linux/mm.h
mm/memory_hotplug.c
mm/sparse-vmemmap.c
mm/sparse.c

index 267d2b79d52d6e3918a18a2c590d797bdec6d3cb..ec8952ff13bebe617ee08b4ad583869ec012365f 100644 (file)
@@ -654,12 +654,14 @@ int kern_addr_valid(unsigned long addr)
 }
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
 #if !ARM64_SWAPPER_USES_SECTION_MAPS
-int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
+int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
+               struct vmem_altmap *altmap)
 {
        return vmemmap_populate_basepages(start, end, node);
 }
 #else  /* !ARM64_SWAPPER_USES_SECTION_MAPS */
-int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
+int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
+               struct vmem_altmap *altmap)
 {
        unsigned long addr = start;
        unsigned long next;
index 9b2d994cddf68cfe4d8417148df393aff76b7f8c..1555aecaaf85b887691466896bb43a48ef0cfd2d 100644 (file)
@@ -754,7 +754,8 @@ void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat)
 #endif
 
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
-int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
+int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
+               struct vmem_altmap *altmap)
 {
        return vmemmap_populate_basepages(start, end, node);
 }
index a07722531b32e3dfb329e38460085ef789b0128e..779b74a96b8fd828c82e03df0e066006234100cb 100644 (file)
@@ -183,7 +183,8 @@ static __meminit void vmemmap_list_populate(unsigned long phys,
        vmemmap_list = vmem_back;
 }
 
-int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
+int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
+               struct vmem_altmap *altmap)
 {
        unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
 
@@ -193,16 +194,12 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
        pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
 
        for (; start < end; start += page_size) {
-               struct vmem_altmap *altmap;
                void *p;
                int rc;
 
                if (vmemmap_populated(start, page_size))
                        continue;
 
-               /* altmap lookups only work at section boundaries */
-               altmap = to_vmem_altmap(SECTION_ALIGN_DOWN(start));
-
                p =  __vmemmap_alloc_block_buf(page_size, node, altmap);
                if (!p)
                        return -ENOMEM;
index 3316d463fc2917f984d985f395cd85541cbd9650..c44ef0e7c466c395cabc05b0a77fc408928e4884 100644 (file)
@@ -211,7 +211,8 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
 /*
  * Add a backed mem_map array to the virtual mem_map array.
  */
-int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
+int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
+               struct vmem_altmap *altmap)
 {
        unsigned long pgt_prot, sgt_prot;
        unsigned long address = start;
index 55ba62957e644116b2e30359f803081ddcd41313..42d27a1a042ad7afcdda3fb524fc6779e8b8ca31 100644 (file)
@@ -2628,7 +2628,7 @@ EXPORT_SYMBOL(_PAGE_CACHE);
 
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
 int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
-                              int node)
+                              int node, struct vmem_altmap *altmap)
 {
        unsigned long pte_base;
 
index e80bb418925442c0eb79399005ce4f130cdf0c3d..594902ef56ef84ee035462f219873e535ed2fa76 100644 (file)
@@ -1411,9 +1411,9 @@ static int __meminit vmemmap_populate_hugepages(unsigned long start,
        return 0;
 }
 
-int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
+int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
+               struct vmem_altmap *altmap)
 {
-       struct vmem_altmap *altmap = to_vmem_altmap(start);
        int err;
 
        if (boot_cpu_has(X86_FEATURE_PSE))
index db276afbefcc6e96b0a3b23a0f59eb7ea12bea27..cbdd6d52e877d2b3592814780e82256f52a385b6 100644 (file)
@@ -327,7 +327,8 @@ extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
 extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
 extern bool is_memblock_offlined(struct memory_block *mem);
 extern void remove_memory(int nid, u64 start, u64 size);
-extern int sparse_add_one_section(struct pglist_data *pgdat, unsigned long start_pfn);
+extern int sparse_add_one_section(struct pglist_data *pgdat,
+               unsigned long start_pfn, struct vmem_altmap *altmap);
 extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
                unsigned long map_offset);
 extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
index ea818ff739cdfbb433fc10634ed5ac77eacbc5b7..2f3a7ebecbe2f7b4757e8726071fdabacc370460 100644 (file)
@@ -2538,7 +2538,8 @@ void sparse_mem_maps_populate_node(struct page **map_map,
                                   unsigned long map_count,
                                   int nodeid);
 
-struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
+struct page *sparse_mem_map_populate(unsigned long pnum, int nid,
+               struct vmem_altmap *altmap);
 pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
 p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
 pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
@@ -2556,7 +2557,8 @@ static inline void *vmemmap_alloc_block_buf(unsigned long size, int node)
 void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
 int vmemmap_populate_basepages(unsigned long start, unsigned long end,
                               int node);
-int vmemmap_populate(unsigned long start, unsigned long end, int node);
+int vmemmap_populate(unsigned long start, unsigned long end, int node,
+               struct vmem_altmap *altmap);
 void vmemmap_populate_print_last(void);
 #ifdef CONFIG_MEMORY_HOTPLUG
 void vmemmap_free(unsigned long start, unsigned long end);
index fc0485dcece145a4e3a8aee4a76dd179b57d5fe7..b36f1822c432d28cbfb9908e77a89504931138e0 100644 (file)
@@ -250,7 +250,7 @@ void __init register_page_bootmem_info_node(struct pglist_data *pgdat)
 #endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */
 
 static int __meminit __add_section(int nid, unsigned long phys_start_pfn,
-               bool want_memblock)
+               struct vmem_altmap *altmap, bool want_memblock)
 {
        int ret;
        int i;
@@ -258,7 +258,7 @@ static int __meminit __add_section(int nid, unsigned long phys_start_pfn,
        if (pfn_valid(phys_start_pfn))
                return -EEXIST;
 
-       ret = sparse_add_one_section(NODE_DATA(nid), phys_start_pfn);
+       ret = sparse_add_one_section(NODE_DATA(nid), phys_start_pfn, altmap);
        if (ret < 0)
                return ret;
 
@@ -317,7 +317,8 @@ int __ref __add_pages(int nid, unsigned long phys_start_pfn,
        }
 
        for (i = start_sec; i <= end_sec; i++) {
-               err = __add_section(nid, section_nr_to_pfn(i), want_memblock);
+               err = __add_section(nid, section_nr_to_pfn(i), altmap,
+                               want_memblock);
 
                /*
                 * EEXIST is finally dealt with by ioresource collision
index 17acf01791fa832e1c8414cecc98034f2d652662..376dcf05a39c8c958f0a537230d316b645abdf86 100644 (file)
@@ -278,7 +278,8 @@ int __meminit vmemmap_populate_basepages(unsigned long start,
        return 0;
 }
 
-struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid)
+struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid,
+               struct vmem_altmap *altmap)
 {
        unsigned long start;
        unsigned long end;
@@ -288,7 +289,7 @@ struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid)
        start = (unsigned long)map;
        end = (unsigned long)(map + PAGES_PER_SECTION);
 
-       if (vmemmap_populate(start, end, nid))
+       if (vmemmap_populate(start, end, nid, altmap))
                return NULL;
 
        return map;
@@ -318,7 +319,7 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
                if (!present_section_nr(pnum))
                        continue;
 
-               map_map[pnum] = sparse_mem_map_populate(pnum, nodeid);
+               map_map[pnum] = sparse_mem_map_populate(pnum, nodeid, NULL);
                if (map_map[pnum])
                        continue;
                ms = __nr_to_section(pnum);
index 7a5dacaa06e3f277c8543c502dba0e3c6a6a1a13..5f4a0dac7836e2e87aef47d6bc23026ee93338e6 100644 (file)
@@ -417,7 +417,8 @@ static void __init sparse_early_usemaps_alloc_node(void *data,
 }
 
 #ifndef CONFIG_SPARSEMEM_VMEMMAP
-struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
+struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid,
+               struct vmem_altmap *altmap)
 {
        struct page *map;
        unsigned long size;
@@ -472,7 +473,7 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
 
                if (!present_section_nr(pnum))
                        continue;
-               map_map[pnum] = sparse_mem_map_populate(pnum, nodeid);
+               map_map[pnum] = sparse_mem_map_populate(pnum, nodeid, NULL);
                if (map_map[pnum])
                        continue;
                ms = __nr_to_section(pnum);
@@ -500,7 +501,7 @@ static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
        struct mem_section *ms = __nr_to_section(pnum);
        int nid = sparse_early_nid(ms);
 
-       map = sparse_mem_map_populate(pnum, nid);
+       map = sparse_mem_map_populate(pnum, nid, NULL);
        if (map)
                return map;
 
@@ -678,10 +679,11 @@ void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
 #endif
 
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
-static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid)
+static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
+               struct vmem_altmap *altmap)
 {
        /* This will make the necessary allocations eventually. */
-       return sparse_mem_map_populate(pnum, nid);
+       return sparse_mem_map_populate(pnum, nid, altmap);
 }
 static void __kfree_section_memmap(struct page *memmap)
 {
@@ -721,7 +723,8 @@ static struct page *__kmalloc_section_memmap(void)
        return ret;
 }
 
-static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid)
+static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
+               struct vmem_altmap *altmap)
 {
        return __kmalloc_section_memmap();
 }
@@ -773,7 +776,8 @@ static void free_map_bootmem(struct page *memmap)
  * set.  If this is <=0, then that means that the passed-in
  * map was not consumed and must be freed.
  */
-int __meminit sparse_add_one_section(struct pglist_data *pgdat, unsigned long start_pfn)
+int __meminit sparse_add_one_section(struct pglist_data *pgdat,
+               unsigned long start_pfn, struct vmem_altmap *altmap)
 {
        unsigned long section_nr = pfn_to_section_nr(start_pfn);
        struct mem_section *ms;
@@ -789,7 +793,7 @@ int __meminit sparse_add_one_section(struct pglist_data *pgdat, unsigned long st
        ret = sparse_index_init(section_nr, pgdat->node_id);
        if (ret < 0 && ret != -EEXIST)
                return ret;
-       memmap = kmalloc_section_memmap(section_nr, pgdat->node_id);
+       memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, altmap);
        if (!memmap)
                return -ENOMEM;
        usemap = __kmalloc_section_usemap();