]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - mm/sparse.c
mm/sparse.c: do not waste pre allocated memmap space
[linux.git] / mm / sparse.c
index f6891c1992b181cfd2c48b9b84dcd5c0ee1244ae..8526d3bf1e4ea499c858d5895b0fe6bd23f75e23 100644 (file)
@@ -458,8 +458,7 @@ struct page __init *__populate_section_memmap(unsigned long pfn,
        if (map)
                return map;
 
-       map = memblock_alloc_try_nid(size,
-                                         PAGE_SIZE, addr,
+       map = memblock_alloc_try_nid_raw(size, size, addr,
                                          MEMBLOCK_ALLOC_ACCESSIBLE, nid);
        if (!map)
                panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa\n",
@@ -482,10 +481,13 @@ static void __init sparse_buffer_init(unsigned long size, int nid)
 {
        phys_addr_t addr = __pa(MAX_DMA_ADDRESS);
        WARN_ON(sparsemap_buf); /* forgot to call sparse_buffer_fini()? */
-       sparsemap_buf =
-               memblock_alloc_try_nid_raw(size, PAGE_SIZE,
-                                               addr,
-                                               MEMBLOCK_ALLOC_ACCESSIBLE, nid);
+       /*
+        * Pre-allocated buffer is mainly used by __populate_section_memmap
+        * and we want it to be properly aligned to the section size - this is
+        * especially the case for VMEMMAP which maps memmap to PMDs
+        */
+       sparsemap_buf = memblock_alloc_try_nid_raw(size, section_map_size(),
+                                       addr, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
        sparsemap_buf_end = sparsemap_buf + size;
 }
 
@@ -647,7 +649,7 @@ void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
 #endif
 
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
-static struct page *populate_section_memmap(unsigned long pfn,
+static struct page * __meminit populate_section_memmap(unsigned long pfn,
                unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
 {
        return __populate_section_memmap(pfn, nr_pages, nid, altmap);
@@ -669,7 +671,7 @@ static void free_map_bootmem(struct page *memmap)
        vmemmap_free(start, end, NULL);
 }
 #else
-struct page *populate_section_memmap(unsigned long pfn,
+struct page * __meminit populate_section_memmap(unsigned long pfn,
                unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
 {
        struct page *page, *ret;