]> asedeno.scripts.mit.edu Git - linux.git/blob - mm/sparse-vmemmap.c
mm: split altmap memory map allocation from normal case
[linux.git] / mm / sparse-vmemmap.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Virtual Memory Map support
4  *
5  * (C) 2007 sgi. Christoph Lameter.
6  *
7  * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
8  * virt_to_page, page_address() to be implemented as a base offset
9  * calculation without memory access.
10  *
11  * However, virtual mappings need a page table and TLBs. Many Linux
12  * architectures already map their physical space using 1-1 mappings
13  * via TLBs. For those arches the virtual memory map is essentially
14  * for free if we use the same page size as the 1-1 mappings. In that
15  * case the overhead consists of a few additional pages that are
16  * allocated to create a view of memory for vmemmap.
17  *
18  * The architecture is expected to provide a vmemmap_populate() function
19  * to instantiate the mapping.
20  */
21 #include <linux/mm.h>
22 #include <linux/mmzone.h>
23 #include <linux/bootmem.h>
24 #include <linux/memremap.h>
25 #include <linux/highmem.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include <linux/vmalloc.h>
29 #include <linux/sched.h>
30 #include <asm/dma.h>
31 #include <asm/pgalloc.h>
32 #include <asm/pgtable.h>
33
34 /*
35  * Allocate a block of memory to be used to back the virtual memory map
36  * or to back the page tables that are used to create the mapping.
37  * Uses the main allocators if they are available, else bootmem.
38  */
39
40 static void * __ref __earlyonly_bootmem_alloc(int node,
41                                 unsigned long size,
42                                 unsigned long align,
43                                 unsigned long goal)
44 {
45         return memblock_virt_alloc_try_nid_raw(size, align, goal,
46                                             BOOTMEM_ALLOC_ACCESSIBLE, node);
47 }
48
49 static void *vmemmap_buf;
50 static void *vmemmap_buf_end;
51
52 void * __meminit vmemmap_alloc_block(unsigned long size, int node)
53 {
54         /* If the main allocator is up use that, fallback to bootmem. */
55         if (slab_is_available()) {
56                 gfp_t gfp_mask = GFP_KERNEL|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
57                 int order = get_order(size);
58                 static bool warned;
59                 struct page *page;
60
61                 page = alloc_pages_node(node, gfp_mask, order);
62                 if (page)
63                         return page_address(page);
64
65                 if (!warned) {
66                         warn_alloc(gfp_mask & ~__GFP_NOWARN, NULL,
67                                    "vmemmap alloc failure: order:%u", order);
68                         warned = true;
69                 }
70                 return NULL;
71         } else
72                 return __earlyonly_bootmem_alloc(node, size, size,
73                                 __pa(MAX_DMA_ADDRESS));
74 }
75
76 /* need to make sure size is all the same during early stage */
77 void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node)
78 {
79         void *ptr;
80
81         if (!vmemmap_buf)
82                 return vmemmap_alloc_block(size, node);
83
84         /* take the from buf */
85         ptr = (void *)ALIGN((unsigned long)vmemmap_buf, size);
86         if (ptr + size > vmemmap_buf_end)
87                 return vmemmap_alloc_block(size, node);
88
89         vmemmap_buf = ptr + size;
90
91         return ptr;
92 }
93
94 static unsigned long __meminit vmem_altmap_next_pfn(struct vmem_altmap *altmap)
95 {
96         return altmap->base_pfn + altmap->reserve + altmap->alloc
97                 + altmap->align;
98 }
99
100 static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap)
101 {
102         unsigned long allocated = altmap->alloc + altmap->align;
103
104         if (altmap->free > allocated)
105                 return altmap->free - allocated;
106         return 0;
107 }
108
109 /**
110  * vmem_altmap_alloc - allocate pages from the vmem_altmap reservation
111  * @altmap - reserved page pool for the allocation
112  * @nr_pfns - size (in pages) of the allocation
113  *
114  * Allocations are aligned to the size of the request
115  */
116 static unsigned long __meminit vmem_altmap_alloc(struct vmem_altmap *altmap,
117                 unsigned long nr_pfns)
118 {
119         unsigned long pfn = vmem_altmap_next_pfn(altmap);
120         unsigned long nr_align;
121
122         nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG);
123         nr_align = ALIGN(pfn, nr_align) - pfn;
124
125         if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap))
126                 return ULONG_MAX;
127         altmap->alloc += nr_pfns;
128         altmap->align += nr_align;
129         return pfn + nr_align;
130 }
131
132 void * __meminit altmap_alloc_block_buf(unsigned long size,
133                 struct vmem_altmap *altmap)
134 {
135         unsigned long pfn, nr_pfns;
136         void *ptr;
137
138         if (size & ~PAGE_MASK) {
139                 pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n",
140                                 __func__, size);
141                 return NULL;
142         }
143
144         nr_pfns = size >> PAGE_SHIFT;
145         pfn = vmem_altmap_alloc(altmap, nr_pfns);
146         if (pfn < ULONG_MAX)
147                 ptr = __va(__pfn_to_phys(pfn));
148         else
149                 ptr = NULL;
150         pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n",
151                         __func__, pfn, altmap->alloc, altmap->align, nr_pfns);
152
153         return ptr;
154 }
155
156 void __meminit vmemmap_verify(pte_t *pte, int node,
157                                 unsigned long start, unsigned long end)
158 {
159         unsigned long pfn = pte_pfn(*pte);
160         int actual_node = early_pfn_to_nid(pfn);
161
162         if (node_distance(actual_node, node) > LOCAL_DISTANCE)
163                 pr_warn("[%lx-%lx] potential offnode page_structs\n",
164                         start, end - 1);
165 }
166
167 pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
168 {
169         pte_t *pte = pte_offset_kernel(pmd, addr);
170         if (pte_none(*pte)) {
171                 pte_t entry;
172                 void *p = vmemmap_alloc_block_buf(PAGE_SIZE, node);
173                 if (!p)
174                         return NULL;
175                 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
176                 set_pte_at(&init_mm, addr, pte, entry);
177         }
178         return pte;
179 }
180
181 static void * __meminit vmemmap_alloc_block_zero(unsigned long size, int node)
182 {
183         void *p = vmemmap_alloc_block(size, node);
184
185         if (!p)
186                 return NULL;
187         memset(p, 0, size);
188
189         return p;
190 }
191
192 pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
193 {
194         pmd_t *pmd = pmd_offset(pud, addr);
195         if (pmd_none(*pmd)) {
196                 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
197                 if (!p)
198                         return NULL;
199                 pmd_populate_kernel(&init_mm, pmd, p);
200         }
201         return pmd;
202 }
203
204 pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node)
205 {
206         pud_t *pud = pud_offset(p4d, addr);
207         if (pud_none(*pud)) {
208                 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
209                 if (!p)
210                         return NULL;
211                 pud_populate(&init_mm, pud, p);
212         }
213         return pud;
214 }
215
216 p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node)
217 {
218         p4d_t *p4d = p4d_offset(pgd, addr);
219         if (p4d_none(*p4d)) {
220                 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
221                 if (!p)
222                         return NULL;
223                 p4d_populate(&init_mm, p4d, p);
224         }
225         return p4d;
226 }
227
228 pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
229 {
230         pgd_t *pgd = pgd_offset_k(addr);
231         if (pgd_none(*pgd)) {
232                 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
233                 if (!p)
234                         return NULL;
235                 pgd_populate(&init_mm, pgd, p);
236         }
237         return pgd;
238 }
239
240 int __meminit vmemmap_populate_basepages(unsigned long start,
241                                          unsigned long end, int node)
242 {
243         unsigned long addr = start;
244         pgd_t *pgd;
245         p4d_t *p4d;
246         pud_t *pud;
247         pmd_t *pmd;
248         pte_t *pte;
249
250         for (; addr < end; addr += PAGE_SIZE) {
251                 pgd = vmemmap_pgd_populate(addr, node);
252                 if (!pgd)
253                         return -ENOMEM;
254                 p4d = vmemmap_p4d_populate(pgd, addr, node);
255                 if (!p4d)
256                         return -ENOMEM;
257                 pud = vmemmap_pud_populate(p4d, addr, node);
258                 if (!pud)
259                         return -ENOMEM;
260                 pmd = vmemmap_pmd_populate(pud, addr, node);
261                 if (!pmd)
262                         return -ENOMEM;
263                 pte = vmemmap_pte_populate(pmd, addr, node);
264                 if (!pte)
265                         return -ENOMEM;
266                 vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
267         }
268
269         return 0;
270 }
271
272 struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid,
273                 struct vmem_altmap *altmap)
274 {
275         unsigned long start;
276         unsigned long end;
277         struct page *map;
278
279         map = pfn_to_page(pnum * PAGES_PER_SECTION);
280         start = (unsigned long)map;
281         end = (unsigned long)(map + PAGES_PER_SECTION);
282
283         if (vmemmap_populate(start, end, nid, altmap))
284                 return NULL;
285
286         return map;
287 }
288
289 void __init sparse_mem_maps_populate_node(struct page **map_map,
290                                           unsigned long pnum_begin,
291                                           unsigned long pnum_end,
292                                           unsigned long map_count, int nodeid)
293 {
294         unsigned long pnum;
295         unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;
296         void *vmemmap_buf_start;
297
298         size = ALIGN(size, PMD_SIZE);
299         vmemmap_buf_start = __earlyonly_bootmem_alloc(nodeid, size * map_count,
300                          PMD_SIZE, __pa(MAX_DMA_ADDRESS));
301
302         if (vmemmap_buf_start) {
303                 vmemmap_buf = vmemmap_buf_start;
304                 vmemmap_buf_end = vmemmap_buf_start + size * map_count;
305         }
306
307         for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
308                 struct mem_section *ms;
309
310                 if (!present_section_nr(pnum))
311                         continue;
312
313                 map_map[pnum] = sparse_mem_map_populate(pnum, nodeid, NULL);
314                 if (map_map[pnum])
315                         continue;
316                 ms = __nr_to_section(pnum);
317                 pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
318                        __func__);
319                 ms->section_mem_map = 0;
320         }
321
322         if (vmemmap_buf_start) {
323                 /* need to free left buf */
324                 memblock_free_early(__pa(vmemmap_buf),
325                                     vmemmap_buf_end - vmemmap_buf);
326                 vmemmap_buf = NULL;
327                 vmemmap_buf_end = NULL;
328         }
329 }