]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - mm/sparse-vmemmap.c
mm/sparse: abstract sparse buffer allocations
[linux.git] / mm / sparse-vmemmap.c
index 95e2c7638a5cc7563bea07f7e4309cf42998ace9..b05c7663c6401d4aa4841a853d12cf7cd8147000 100644 (file)
@@ -43,12 +43,9 @@ static void * __ref __earlyonly_bootmem_alloc(int node,
                                unsigned long goal)
 {
        return memblock_virt_alloc_try_nid_raw(size, align, goal,
-                                           BOOTMEM_ALLOC_ACCESSIBLE, node);
+                                              BOOTMEM_ALLOC_ACCESSIBLE, node);
 }
 
-static void *vmemmap_buf;
-static void *vmemmap_buf_end;
-
 void * __meminit vmemmap_alloc_block(unsigned long size, int node)
 {
        /* If the main allocator is up use that, fallback to bootmem. */
@@ -76,18 +73,10 @@ void * __meminit vmemmap_alloc_block(unsigned long size, int node)
 /* need to make sure size is all the same during early stage */
 void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node)
 {
-       void *ptr;
-
-       if (!vmemmap_buf)
-               return vmemmap_alloc_block(size, node);
-
-       /* take the from buf */
-       ptr = (void *)ALIGN((unsigned long)vmemmap_buf, size);
-       if (ptr + size > vmemmap_buf_end)
-               return vmemmap_alloc_block(size, node);
-
-       vmemmap_buf = ptr + size;
+       void *ptr = sparse_buffer_alloc(size);
 
+       if (!ptr)
+               ptr = vmemmap_alloc_block(size, node);
        return ptr;
 }
 
@@ -279,19 +268,9 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
                                          unsigned long map_count, int nodeid)
 {
        unsigned long pnum;
-       unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;
-       void *vmemmap_buf_start;
        int nr_consumed_maps = 0;
 
-       size = ALIGN(size, PMD_SIZE);
-       vmemmap_buf_start = __earlyonly_bootmem_alloc(nodeid, size * map_count,
-                        PMD_SIZE, __pa(MAX_DMA_ADDRESS));
-
-       if (vmemmap_buf_start) {
-               vmemmap_buf = vmemmap_buf_start;
-               vmemmap_buf_end = vmemmap_buf_start + size * map_count;
-       }
-
+       sparse_buffer_init(section_map_size() * map_count, nodeid);
        for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
                if (!present_section_nr(pnum))
                        continue;
@@ -303,12 +282,5 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
                pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
                       __func__);
        }
-
-       if (vmemmap_buf_start) {
-               /* need to free left buf */
-               memblock_free_early(__pa(vmemmap_buf),
-                                   vmemmap_buf_end - vmemmap_buf);
-               vmemmap_buf = NULL;
-               vmemmap_buf_end = NULL;
-       }
+       sparse_buffer_fini();
 }