2 * sparse memory mappings.
5 #include <linux/slab.h>
6 #include <linux/mmzone.h>
7 #include <linux/bootmem.h>
8 #include <linux/compiler.h>
9 #include <linux/highmem.h>
10 #include <linux/export.h>
11 #include <linux/spinlock.h>
12 #include <linux/vmalloc.h>
16 #include <asm/pgalloc.h>
17 #include <asm/pgtable.h>
20 * Permanent SPARSEMEM data:
22 * 1) mem_section - memory sections, mem_map's for valid memory
24 #ifdef CONFIG_SPARSEMEM_EXTREME
25 struct mem_section *mem_section[NR_SECTION_ROOTS]
26 ____cacheline_internodealigned_in_smp;
28 struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
29 ____cacheline_internodealigned_in_smp;
31 EXPORT_SYMBOL(mem_section);
33 #ifdef NODE_NOT_IN_PAGE_FLAGS
35 * If we did not store the node number in the page then we have to
36 * do a lookup in the section_to_node_table in order to find which
37 * node the page belongs to.
39 #if MAX_NUMNODES <= 256
40 static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
42 static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
45 int page_to_nid(const struct page *page)
47 return section_to_node_table[page_to_section(page)];
49 EXPORT_SYMBOL(page_to_nid);
51 static void set_section_nid(unsigned long section_nr, int nid)
53 section_to_node_table[section_nr] = nid;
55 #else /* !NODE_NOT_IN_PAGE_FLAGS */
56 static inline void set_section_nid(unsigned long section_nr, int nid)
61 #ifdef CONFIG_SPARSEMEM_EXTREME
62 static noinline struct mem_section __ref *sparse_index_alloc(int nid)
64 struct mem_section *section = NULL;
65 unsigned long array_size = SECTIONS_PER_ROOT *
66 sizeof(struct mem_section);
68 if (slab_is_available()) {
69 if (node_state(nid, N_HIGH_MEMORY))
70 section = kzalloc_node(array_size, GFP_KERNEL, nid);
72 section = kzalloc(array_size, GFP_KERNEL);
74 section = memblock_virt_alloc_node(array_size, nid);
80 static int __meminit sparse_index_init(unsigned long section_nr, int nid)
82 unsigned long root = SECTION_NR_TO_ROOT(section_nr);
83 struct mem_section *section;
85 if (mem_section[root])
88 section = sparse_index_alloc(nid);
92 mem_section[root] = section;
96 #else /* !SPARSEMEM_EXTREME */
97 static inline int sparse_index_init(unsigned long section_nr, int nid)
103 #ifdef CONFIG_SPARSEMEM_EXTREME
104 int __section_nr(struct mem_section* ms)
106 unsigned long root_nr;
107 struct mem_section* root;
109 for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
110 root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
114 if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))
118 VM_BUG_ON(root_nr == NR_SECTION_ROOTS);
120 return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
123 int __section_nr(struct mem_section* ms)
125 return (int)(ms - mem_section[0]);
130 * During early boot, before section_mem_map is used for an actual
131 * mem_map, we use section_mem_map to store the section's NUMA
132 * node. This keeps us from having to use another data structure. The
133 * node information is cleared just before we store the real mem_map.
135 static inline unsigned long sparse_encode_early_nid(int nid)
137 return (nid << SECTION_NID_SHIFT);
140 static inline int sparse_early_nid(struct mem_section *section)
142 return (section->section_mem_map >> SECTION_NID_SHIFT);
145 /* Validate the physical addressing limitations of the model */
146 void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
147 unsigned long *end_pfn)
149 unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
152 * Sanity checks - do not allow an architecture to pass
153 * in larger pfns than the maximum scope of sparsemem:
155 if (*start_pfn > max_sparsemem_pfn) {
156 mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
157 "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
158 *start_pfn, *end_pfn, max_sparsemem_pfn);
160 *start_pfn = max_sparsemem_pfn;
161 *end_pfn = max_sparsemem_pfn;
162 } else if (*end_pfn > max_sparsemem_pfn) {
163 mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
164 "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
165 *start_pfn, *end_pfn, max_sparsemem_pfn);
167 *end_pfn = max_sparsemem_pfn;
172 * There are a number of times that we loop over NR_MEM_SECTIONS,
173 * looking for section_present() on each. But, when we have very
174 * large physical address spaces, NR_MEM_SECTIONS can also be
175 * very large which makes the loops quite long.
177 * Keeping track of this gives us an easy way to break out of
180 int __highest_present_section_nr;
181 static void section_mark_present(struct mem_section *ms)
183 int section_nr = __section_nr(ms);
185 if (section_nr > __highest_present_section_nr)
186 __highest_present_section_nr = section_nr;
188 ms->section_mem_map |= SECTION_MARKED_PRESENT;
191 static inline int next_present_section_nr(int section_nr)
195 if (present_section_nr(section_nr))
197 } while ((section_nr < NR_MEM_SECTIONS) &&
198 (section_nr <= __highest_present_section_nr));
202 #define for_each_present_section_nr(start, section_nr) \
203 for (section_nr = next_present_section_nr(start-1); \
204 ((section_nr >= 0) && \
205 (section_nr < NR_MEM_SECTIONS) && \
206 (section_nr <= __highest_present_section_nr)); \
207 section_nr = next_present_section_nr(section_nr))
209 /* Record a memory area against a node. */
210 void __init memory_present(int nid, unsigned long start, unsigned long end)
214 start &= PAGE_SECTION_MASK;
215 mminit_validate_memmodel_limits(&start, &end);
216 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
217 unsigned long section = pfn_to_section_nr(pfn);
218 struct mem_section *ms;
220 sparse_index_init(section, nid);
221 set_section_nid(section, nid);
223 ms = __nr_to_section(section);
224 if (!ms->section_mem_map) {
225 ms->section_mem_map = sparse_encode_early_nid(nid);
226 section_mark_present(ms);
232 * Only used by the i386 NUMA architecures, but relatively
235 unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn,
236 unsigned long end_pfn)
239 unsigned long nr_pages = 0;
241 mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
242 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
243 if (nid != early_pfn_to_nid(pfn))
246 if (pfn_present(pfn))
247 nr_pages += PAGES_PER_SECTION;
250 return nr_pages * sizeof(struct page);
254 * Subtle, we encode the real pfn into the mem_map such that
255 * the identity pfn - section_mem_map will return the actual
256 * physical page frame number.
258 static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
260 return (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
264 * Decode mem_map from the coded memmap
266 struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
268 /* mask off the extra low bits of information */
269 coded_mem_map &= SECTION_MAP_MASK;
270 return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
273 static int __meminit sparse_init_one_section(struct mem_section *ms,
274 unsigned long pnum, struct page *mem_map,
275 unsigned long *pageblock_bitmap)
277 if (!present_section(ms))
280 ms->section_mem_map &= ~SECTION_MAP_MASK;
281 ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) |
283 ms->pageblock_flags = pageblock_bitmap;
288 unsigned long usemap_size(void)
290 return BITS_TO_LONGS(SECTION_BLOCKFLAGS_BITS) * sizeof(unsigned long);
293 #ifdef CONFIG_MEMORY_HOTPLUG
294 static unsigned long *__kmalloc_section_usemap(void)
296 return kmalloc(usemap_size(), GFP_KERNEL);
298 #endif /* CONFIG_MEMORY_HOTPLUG */
300 #ifdef CONFIG_MEMORY_HOTREMOVE
301 static unsigned long * __init
302 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
305 unsigned long goal, limit;
309 * A page may contain usemaps for other sections preventing the
310 * page being freed and making a section unremovable while
311 * other sections referencing the usemap remain active. Similarly,
312 * a pgdat can prevent a section being removed. If section A
313 * contains a pgdat and section B contains the usemap, both
314 * sections become inter-dependent. This allocates usemaps
315 * from the same section as the pgdat where possible to avoid
318 goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT);
319 limit = goal + (1UL << PA_SECTION_SHIFT);
320 nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
322 p = memblock_virt_alloc_try_nid_nopanic(size,
323 SMP_CACHE_BYTES, goal, limit,
332 static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
334 unsigned long usemap_snr, pgdat_snr;
335 static unsigned long old_usemap_snr = NR_MEM_SECTIONS;
336 static unsigned long old_pgdat_snr = NR_MEM_SECTIONS;
337 struct pglist_data *pgdat = NODE_DATA(nid);
340 usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT);
341 pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
342 if (usemap_snr == pgdat_snr)
345 if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr)
346 /* skip redundant message */
349 old_usemap_snr = usemap_snr;
350 old_pgdat_snr = pgdat_snr;
352 usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr));
353 if (usemap_nid != nid) {
354 pr_info("node %d must be removed before remove section %ld\n",
359 * There is a circular dependency.
360 * Some platforms allow un-removable section because they will just
361 * gather other removable sections for dynamic partitioning.
362 * Just notify un-removable section's number here.
364 pr_info("Section %ld and %ld (node %d) have a circular dependency on usemap and pgdat allocations\n",
365 usemap_snr, pgdat_snr, nid);
368 static unsigned long * __init
369 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
372 return memblock_virt_alloc_node_nopanic(size, pgdat->node_id);
375 static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
378 #endif /* CONFIG_MEMORY_HOTREMOVE */
380 static void __init sparse_early_usemaps_alloc_node(void *data,
381 unsigned long pnum_begin,
382 unsigned long pnum_end,
383 unsigned long usemap_count, int nodeid)
387 unsigned long **usemap_map = (unsigned long **)data;
388 int size = usemap_size();
390 usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid),
391 size * usemap_count);
393 pr_warn("%s: allocation failed\n", __func__);
397 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
398 if (!present_section_nr(pnum))
400 usemap_map[pnum] = usemap;
402 check_usemap_section_nr(nodeid, usemap_map[pnum]);
406 #ifndef CONFIG_SPARSEMEM_VMEMMAP
407 struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
412 map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
416 size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
417 map = memblock_virt_alloc_try_nid(size,
418 PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
419 BOOTMEM_ALLOC_ACCESSIBLE, nid);
422 void __init sparse_mem_maps_populate_node(struct page **map_map,
423 unsigned long pnum_begin,
424 unsigned long pnum_end,
425 unsigned long map_count, int nodeid)
429 unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;
431 map = alloc_remap(nodeid, size * map_count);
433 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
434 if (!present_section_nr(pnum))
442 size = PAGE_ALIGN(size);
443 map = memblock_virt_alloc_try_nid(size * map_count,
444 PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
445 BOOTMEM_ALLOC_ACCESSIBLE, nodeid);
447 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
448 if (!present_section_nr(pnum))
457 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
458 struct mem_section *ms;
460 if (!present_section_nr(pnum))
462 map_map[pnum] = sparse_mem_map_populate(pnum, nodeid);
465 ms = __nr_to_section(pnum);
466 pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
468 ms->section_mem_map = 0;
471 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
473 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
474 static void __init sparse_early_mem_maps_alloc_node(void *data,
475 unsigned long pnum_begin,
476 unsigned long pnum_end,
477 unsigned long map_count, int nodeid)
479 struct page **map_map = (struct page **)data;
480 sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end,
484 static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
487 struct mem_section *ms = __nr_to_section(pnum);
488 int nid = sparse_early_nid(ms);
490 map = sparse_mem_map_populate(pnum, nid);
494 pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
496 ms->section_mem_map = 0;
501 void __weak __meminit vmemmap_populate_print_last(void)
506 * alloc_usemap_and_memmap - memory alloction for pageblock flags and vmemmap
507 * @map: usemap_map for pageblock flags or mmap_map for vmemmap
509 static void __init alloc_usemap_and_memmap(void (*alloc_func)
510 (void *, unsigned long, unsigned long,
511 unsigned long, int), void *data)
514 unsigned long map_count;
515 int nodeid_begin = 0;
516 unsigned long pnum_begin = 0;
518 for_each_present_section_nr(0, pnum) {
519 struct mem_section *ms;
521 ms = __nr_to_section(pnum);
522 nodeid_begin = sparse_early_nid(ms);
527 for_each_present_section_nr(pnum_begin + 1, pnum) {
528 struct mem_section *ms;
531 ms = __nr_to_section(pnum);
532 nodeid = sparse_early_nid(ms);
533 if (nodeid == nodeid_begin) {
537 /* ok, we need to take cake of from pnum_begin to pnum - 1*/
538 alloc_func(data, pnum_begin, pnum,
539 map_count, nodeid_begin);
540 /* new start, update count etc*/
541 nodeid_begin = nodeid;
546 alloc_func(data, pnum_begin, NR_MEM_SECTIONS,
547 map_count, nodeid_begin);
551 * Allocate the accumulated non-linear sections, allocate a mem_map
552 * for each and record the physical to section mapping.
554 void __init sparse_init(void)
558 unsigned long *usemap;
559 unsigned long **usemap_map;
561 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
563 struct page **map_map;
566 /* see include/linux/mmzone.h 'struct mem_section' definition */
567 BUILD_BUG_ON(!is_power_of_2(sizeof(struct mem_section)));
569 /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */
570 set_pageblock_order();
573 * map is using big page (aka 2M in x86 64 bit)
574 * usemap is less one page (aka 24 bytes)
575 * so alloc 2M (with 2M align) and 24 bytes in turn will
576 * make next 2M slip to one more 2M later.
577 * then in big system, the memory will have a lot of holes...
578 * here try to allocate 2M pages continuously.
580 * powerpc need to call sparse_init_one_section right after each
581 * sparse_early_mem_map_alloc, so allocate usemap_map at first.
583 size = sizeof(unsigned long *) * NR_MEM_SECTIONS;
584 usemap_map = memblock_virt_alloc(size, 0);
586 panic("can not allocate usemap_map\n");
587 alloc_usemap_and_memmap(sparse_early_usemaps_alloc_node,
590 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
591 size2 = sizeof(struct page *) * NR_MEM_SECTIONS;
592 map_map = memblock_virt_alloc(size2, 0);
594 panic("can not allocate map_map\n");
595 alloc_usemap_and_memmap(sparse_early_mem_maps_alloc_node,
599 for_each_present_section_nr(0, pnum) {
600 usemap = usemap_map[pnum];
604 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
607 map = sparse_early_mem_map_alloc(pnum);
612 sparse_init_one_section(__nr_to_section(pnum), pnum, map,
616 vmemmap_populate_print_last();
618 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
619 memblock_free_early(__pa(map_map), size2);
621 memblock_free_early(__pa(usemap_map), size);
624 #ifdef CONFIG_MEMORY_HOTPLUG
625 #ifdef CONFIG_SPARSEMEM_VMEMMAP
626 static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid)
628 /* This will make the necessary allocations eventually. */
629 return sparse_mem_map_populate(pnum, nid);
631 static void __kfree_section_memmap(struct page *memmap)
633 unsigned long start = (unsigned long)memmap;
634 unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
636 vmemmap_free(start, end);
638 #ifdef CONFIG_MEMORY_HOTREMOVE
639 static void free_map_bootmem(struct page *memmap)
641 unsigned long start = (unsigned long)memmap;
642 unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
644 vmemmap_free(start, end);
646 #endif /* CONFIG_MEMORY_HOTREMOVE */
648 static struct page *__kmalloc_section_memmap(void)
650 struct page *page, *ret;
651 unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION;
653 page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
657 ret = vmalloc(memmap_size);
663 ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
669 static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid)
671 return __kmalloc_section_memmap();
674 static void __kfree_section_memmap(struct page *memmap)
676 if (is_vmalloc_addr(memmap))
679 free_pages((unsigned long)memmap,
680 get_order(sizeof(struct page) * PAGES_PER_SECTION));
683 #ifdef CONFIG_MEMORY_HOTREMOVE
684 static void free_map_bootmem(struct page *memmap)
686 unsigned long maps_section_nr, removing_section_nr, i;
687 unsigned long magic, nr_pages;
688 struct page *page = virt_to_page(memmap);
690 nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
693 for (i = 0; i < nr_pages; i++, page++) {
694 magic = (unsigned long) page->freelist;
696 BUG_ON(magic == NODE_INFO);
698 maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
699 removing_section_nr = page_private(page);
702 * When this function is called, the removing section is
703 * logical offlined state. This means all pages are isolated
704 * from page allocator. If removing section's memmap is placed
705 * on the same section, it must not be freed.
706 * If it is freed, page allocator may allocate it which will
707 * be removed physically soon.
709 if (maps_section_nr != removing_section_nr)
710 put_page_bootmem(page);
713 #endif /* CONFIG_MEMORY_HOTREMOVE */
714 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
717 * returns the number of sections whose mem_maps were properly
718 * set. If this is <=0, then that means that the passed-in
719 * map was not consumed and must be freed.
721 int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn)
723 unsigned long section_nr = pfn_to_section_nr(start_pfn);
724 struct pglist_data *pgdat = zone->zone_pgdat;
725 struct mem_section *ms;
727 unsigned long *usemap;
732 * no locking for this, because it does its own
733 * plus, it does a kmalloc
735 ret = sparse_index_init(section_nr, pgdat->node_id);
736 if (ret < 0 && ret != -EEXIST)
738 memmap = kmalloc_section_memmap(section_nr, pgdat->node_id);
741 usemap = __kmalloc_section_usemap();
743 __kfree_section_memmap(memmap);
747 pgdat_resize_lock(pgdat, &flags);
749 ms = __pfn_to_section(start_pfn);
750 if (ms->section_mem_map & SECTION_MARKED_PRESENT) {
755 memset(memmap, 0, sizeof(struct page) * PAGES_PER_SECTION);
757 section_mark_present(ms);
759 ret = sparse_init_one_section(ms, section_nr, memmap, usemap);
762 pgdat_resize_unlock(pgdat, &flags);
765 __kfree_section_memmap(memmap);
770 #ifdef CONFIG_MEMORY_HOTREMOVE
771 #ifdef CONFIG_MEMORY_FAILURE
772 static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
779 for (i = 0; i < nr_pages; i++) {
780 if (PageHWPoison(&memmap[i])) {
781 atomic_long_sub(1, &num_poisoned_pages);
782 ClearPageHWPoison(&memmap[i]);
787 static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
792 static void free_section_usemap(struct page *memmap, unsigned long *usemap)
794 struct page *usemap_page;
799 usemap_page = virt_to_page(usemap);
801 * Check to see if allocation came from hot-plug-add
803 if (PageSlab(usemap_page) || PageCompound(usemap_page)) {
806 __kfree_section_memmap(memmap);
811 * The usemap came from bootmem. This is packed with other usemaps
812 * on the section which has pgdat at boot time. Just keep it as is now.
816 free_map_bootmem(memmap);
819 void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
820 unsigned long map_offset)
822 struct page *memmap = NULL;
823 unsigned long *usemap = NULL, flags;
824 struct pglist_data *pgdat = zone->zone_pgdat;
826 pgdat_resize_lock(pgdat, &flags);
827 if (ms->section_mem_map) {
828 usemap = ms->pageblock_flags;
829 memmap = sparse_decode_mem_map(ms->section_mem_map,
831 ms->section_mem_map = 0;
832 ms->pageblock_flags = NULL;
834 pgdat_resize_unlock(pgdat, &flags);
836 clear_hwpoisoned_pages(memmap + map_offset,
837 PAGES_PER_SECTION - map_offset);
838 free_section_usemap(memmap, usemap);
840 #endif /* CONFIG_MEMORY_HOTREMOVE */
841 #endif /* CONFIG_MEMORY_HOTPLUG */