2 * mm/percpu.c - percpu memory allocator
4 * Copyright (C) 2009 SUSE Linux Products GmbH
5 * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
7 * Copyright (C) 2017 Facebook Inc.
8 * Copyright (C) 2017 Dennis Zhou <dennisszhou@gmail.com>
10 * This file is released under the GPLv2 license.
12 * The percpu allocator handles both static and dynamic areas. Percpu
13 * areas are allocated in chunks which are divided into units. There is
14 * a 1-to-1 mapping for units to possible cpus. These units are grouped
15 * based on NUMA properties of the machine.
18 * ------------------- ------------------- ------------
19 * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
20 * ------------------- ...... ------------------- .... ------------
22 * Allocation is done by offsets into a unit's address space. Ie., an
23 * area of 512 bytes at 6k in c1 occupies 512 bytes at 6k in c1:u0,
24 * c1:u1, c1:u2, etc. On NUMA machines, the mapping may be non-linear
25 * and even sparse. Access is handled by configuring percpu base
26 * registers according to the cpu to unit mappings and offsetting the
27 * base address using pcpu_unit_size.
29 * There is special consideration for the first chunk which must handle
30 * the static percpu variables in the kernel image as allocation services
31 * are not online yet. In short, the first chunk is structured like so:
33 * <Static | [Reserved] | Dynamic>
35 * The static data is copied from the original section managed by the
36 * linker. The reserved section, if non-zero, primarily manages static
37 * percpu variables from kernel modules. Finally, the dynamic section
38 * takes care of normal allocations.
40 * The allocator organizes chunks into lists according to free size and
41 * tries to allocate from the fullest chunk first. Each chunk is managed
42 * by a bitmap with metadata blocks. The allocation map is updated on
43 * every allocation and free to reflect the current state while the boundary
44 * map is only updated on allocation. Each metadata block contains
45 * information to help mitigate the need to iterate over large portions
46 * of the bitmap. The reverse mapping from page to chunk is stored in
47 * the page's index. Lastly, units are lazily backed and grow in unison.
49 * There is a unique conversion that goes on here between bytes and bits.
50 * Each bit represents a fragment of size PCPU_MIN_ALLOC_SIZE. The chunk
51 * tracks the number of pages it is responsible for in nr_pages. Helper
52 * functions are used to convert from between the bytes, bits, and blocks.
53 * All hints are managed in bits unless explicitly stated.
55 * To use this allocator, arch code should do the following:
57 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
58 * regular address to percpu pointer and back if they need to be
59 * different from the default
61 * - use pcpu_setup_first_chunk() during percpu area initialization to
62 * setup the first chunk containing the kernel static percpu area
65 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
67 #include <linux/bitmap.h>
68 #include <linux/bootmem.h>
69 #include <linux/err.h>
70 #include <linux/lcm.h>
71 #include <linux/list.h>
72 #include <linux/log2.h>
74 #include <linux/module.h>
75 #include <linux/mutex.h>
76 #include <linux/percpu.h>
77 #include <linux/pfn.h>
78 #include <linux/slab.h>
79 #include <linux/spinlock.h>
80 #include <linux/vmalloc.h>
81 #include <linux/workqueue.h>
82 #include <linux/kmemleak.h>
84 #include <asm/cacheflush.h>
85 #include <asm/sections.h>
86 #include <asm/tlbflush.h>
89 #define CREATE_TRACE_POINTS
90 #include <trace/events/percpu.h>
92 #include "percpu-internal.h"
94 /* the slots are sorted by free bytes left, 1-31 bytes share the same slot */
95 #define PCPU_SLOT_BASE_SHIFT 5
97 #define PCPU_EMPTY_POP_PAGES_LOW 2
98 #define PCPU_EMPTY_POP_PAGES_HIGH 4
101 /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
102 #ifndef __addr_to_pcpu_ptr
103 #define __addr_to_pcpu_ptr(addr) \
104 (void __percpu *)((unsigned long)(addr) - \
105 (unsigned long)pcpu_base_addr + \
106 (unsigned long)__per_cpu_start)
108 #ifndef __pcpu_ptr_to_addr
109 #define __pcpu_ptr_to_addr(ptr) \
110 (void __force *)((unsigned long)(ptr) + \
111 (unsigned long)pcpu_base_addr - \
112 (unsigned long)__per_cpu_start)
114 #else /* CONFIG_SMP */
115 /* on UP, it's always identity mapped */
116 #define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr)
117 #define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr)
118 #endif /* CONFIG_SMP */
120 static int pcpu_unit_pages __ro_after_init;
121 static int pcpu_unit_size __ro_after_init;
122 static int pcpu_nr_units __ro_after_init;
123 static int pcpu_atom_size __ro_after_init;
124 int pcpu_nr_slots __ro_after_init;
125 static size_t pcpu_chunk_struct_size __ro_after_init;
127 /* cpus with the lowest and highest unit addresses */
128 static unsigned int pcpu_low_unit_cpu __ro_after_init;
129 static unsigned int pcpu_high_unit_cpu __ro_after_init;
131 /* the address of the first chunk which starts with the kernel static area */
132 void *pcpu_base_addr __ro_after_init;
133 EXPORT_SYMBOL_GPL(pcpu_base_addr);
135 static const int *pcpu_unit_map __ro_after_init; /* cpu -> unit */
136 const unsigned long *pcpu_unit_offsets __ro_after_init; /* cpu -> unit offset */
138 /* group information, used for vm allocation */
139 static int pcpu_nr_groups __ro_after_init;
140 static const unsigned long *pcpu_group_offsets __ro_after_init;
141 static const size_t *pcpu_group_sizes __ro_after_init;
144 * The first chunk which always exists. Note that unlike other
145 * chunks, this one can be allocated and mapped in several different
146 * ways and thus often doesn't live in the vmalloc area.
148 struct pcpu_chunk *pcpu_first_chunk __ro_after_init;
151 * Optional reserved chunk. This chunk reserves part of the first
152 * chunk and serves it for reserved allocations. When the reserved
153 * region doesn't exist, the following variable is NULL.
155 struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init;
157 DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */
158 static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */
160 struct list_head *pcpu_slot __ro_after_init; /* chunk list slots */
162 /* chunks which need their map areas extended, protected by pcpu_lock */
163 static LIST_HEAD(pcpu_map_extend_chunks);
166 * The number of empty populated pages, protected by pcpu_lock. The
167 * reserved chunk doesn't contribute to the count.
169 int pcpu_nr_empty_pop_pages;
172 * Balance work is used to populate or destroy chunks asynchronously. We
173 * try to keep the number of populated free pages between
174 * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one
177 static void pcpu_balance_workfn(struct work_struct *work);
178 static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn);
179 static bool pcpu_async_enabled __read_mostly;
180 static bool pcpu_atomic_alloc_failed;
182 static void pcpu_schedule_balance_work(void)
184 if (pcpu_async_enabled)
185 schedule_work(&pcpu_balance_work);
189 * pcpu_addr_in_chunk - check if the address is served from this chunk
190 * @chunk: chunk of interest
191 * @addr: percpu address
194 * True if the address is served from this chunk.
196 static bool pcpu_addr_in_chunk(struct pcpu_chunk *chunk, void *addr)
198 void *start_addr, *end_addr;
203 start_addr = chunk->base_addr + chunk->start_offset;
204 end_addr = chunk->base_addr + chunk->nr_pages * PAGE_SIZE -
207 return addr >= start_addr && addr < end_addr;
210 static int __pcpu_size_to_slot(int size)
212 int highbit = fls(size); /* size is in bytes */
213 return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
216 static int pcpu_size_to_slot(int size)
218 if (size == pcpu_unit_size)
219 return pcpu_nr_slots - 1;
220 return __pcpu_size_to_slot(size);
223 static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
225 if (chunk->free_bytes < PCPU_MIN_ALLOC_SIZE || chunk->contig_bits == 0)
228 return pcpu_size_to_slot(chunk->free_bytes);
231 /* set the pointer to a chunk in a page struct */
232 static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
234 page->index = (unsigned long)pcpu;
237 /* obtain pointer to a chunk from a page struct */
238 static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
240 return (struct pcpu_chunk *)page->index;
243 static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
245 return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
248 static unsigned long pcpu_unit_page_offset(unsigned int cpu, int page_idx)
250 return pcpu_unit_offsets[cpu] + (page_idx << PAGE_SHIFT);
253 static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
254 unsigned int cpu, int page_idx)
256 return (unsigned long)chunk->base_addr +
257 pcpu_unit_page_offset(cpu, page_idx);
260 static void pcpu_next_unpop(unsigned long *bitmap, int *rs, int *re, int end)
262 *rs = find_next_zero_bit(bitmap, end, *rs);
263 *re = find_next_bit(bitmap, end, *rs + 1);
266 static void pcpu_next_pop(unsigned long *bitmap, int *rs, int *re, int end)
268 *rs = find_next_bit(bitmap, end, *rs);
269 *re = find_next_zero_bit(bitmap, end, *rs + 1);
273 * Bitmap region iterators. Iterates over the bitmap between
274 * [@start, @end) in @chunk. @rs and @re should be integer variables
275 * and will be set to start and end index of the current free region.
277 #define pcpu_for_each_unpop_region(bitmap, rs, re, start, end) \
278 for ((rs) = (start), pcpu_next_unpop((bitmap), &(rs), &(re), (end)); \
280 (rs) = (re) + 1, pcpu_next_unpop((bitmap), &(rs), &(re), (end)))
282 #define pcpu_for_each_pop_region(bitmap, rs, re, start, end) \
283 for ((rs) = (start), pcpu_next_pop((bitmap), &(rs), &(re), (end)); \
285 (rs) = (re) + 1, pcpu_next_pop((bitmap), &(rs), &(re), (end)))
288 * The following are helper functions to help access bitmaps and convert
289 * between bitmap offsets to address offsets.
291 static unsigned long *pcpu_index_alloc_map(struct pcpu_chunk *chunk, int index)
293 return chunk->alloc_map +
294 (index * PCPU_BITMAP_BLOCK_BITS / BITS_PER_LONG);
297 static unsigned long pcpu_off_to_block_index(int off)
299 return off / PCPU_BITMAP_BLOCK_BITS;
302 static unsigned long pcpu_off_to_block_off(int off)
304 return off & (PCPU_BITMAP_BLOCK_BITS - 1);
307 static unsigned long pcpu_block_off_to_off(int index, int off)
309 return index * PCPU_BITMAP_BLOCK_BITS + off;
313 * pcpu_next_md_free_region - finds the next hint free area
314 * @chunk: chunk of interest
315 * @bit_off: chunk offset
316 * @bits: size of free area
318 * Helper function for pcpu_for_each_md_free_region. It checks
319 * block->contig_hint and performs aggregation across blocks to find the
320 * next hint. It modifies bit_off and bits in-place to be consumed in the
323 static void pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off,
326 int i = pcpu_off_to_block_index(*bit_off);
327 int block_off = pcpu_off_to_block_off(*bit_off);
328 struct pcpu_block_md *block;
331 for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
333 /* handles contig area across blocks */
335 *bits += block->left_free;
336 if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
342 * This checks three things. First is there a contig_hint to
343 * check. Second, have we checked this hint before by
344 * comparing the block_off. Third, is this the same as the
345 * right contig hint. In the last case, it spills over into
346 * the next block and should be handled by the contig area
347 * across blocks code.
349 *bits = block->contig_hint;
350 if (*bits && block->contig_hint_start >= block_off &&
351 *bits + block->contig_hint_start < PCPU_BITMAP_BLOCK_BITS) {
352 *bit_off = pcpu_block_off_to_off(i,
353 block->contig_hint_start);
356 /* reset to satisfy the second predicate above */
359 *bits = block->right_free;
360 *bit_off = (i + 1) * PCPU_BITMAP_BLOCK_BITS - block->right_free;
365 * pcpu_next_fit_region - finds fit areas for a given allocation request
366 * @chunk: chunk of interest
367 * @alloc_bits: size of allocation
368 * @align: alignment of area (max PAGE_SIZE)
369 * @bit_off: chunk offset
370 * @bits: size of free area
372 * Finds the next free region that is viable for use with a given size and
373 * alignment. This only returns if there is a valid area to be used for this
374 * allocation. block->first_free is returned if the allocation request fits
375 * within the block to see if the request can be fulfilled prior to the contig
378 static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits,
379 int align, int *bit_off, int *bits)
381 int i = pcpu_off_to_block_index(*bit_off);
382 int block_off = pcpu_off_to_block_off(*bit_off);
383 struct pcpu_block_md *block;
386 for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
388 /* handles contig area across blocks */
390 *bits += block->left_free;
391 if (*bits >= alloc_bits)
393 if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
397 /* check block->contig_hint */
398 *bits = ALIGN(block->contig_hint_start, align) -
399 block->contig_hint_start;
401 * This uses the block offset to determine if this has been
402 * checked in the prior iteration.
404 if (block->contig_hint &&
405 block->contig_hint_start >= block_off &&
406 block->contig_hint >= *bits + alloc_bits) {
407 *bits += alloc_bits + block->contig_hint_start -
409 *bit_off = pcpu_block_off_to_off(i, block->first_free);
412 /* reset to satisfy the second predicate above */
415 *bit_off = ALIGN(PCPU_BITMAP_BLOCK_BITS - block->right_free,
417 *bits = PCPU_BITMAP_BLOCK_BITS - *bit_off;
418 *bit_off = pcpu_block_off_to_off(i, *bit_off);
419 if (*bits >= alloc_bits)
423 /* no valid offsets were found - fail condition */
424 *bit_off = pcpu_chunk_map_bits(chunk);
428 * Metadata free area iterators. These perform aggregation of free areas
429 * based on the metadata blocks and return the offset @bit_off and size in
430 * bits of the free area @bits. pcpu_for_each_fit_region only returns when
431 * a fit is found for the allocation request.
433 #define pcpu_for_each_md_free_region(chunk, bit_off, bits) \
434 for (pcpu_next_md_free_region((chunk), &(bit_off), &(bits)); \
435 (bit_off) < pcpu_chunk_map_bits((chunk)); \
436 (bit_off) += (bits) + 1, \
437 pcpu_next_md_free_region((chunk), &(bit_off), &(bits)))
439 #define pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) \
440 for (pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
442 (bit_off) < pcpu_chunk_map_bits((chunk)); \
443 (bit_off) += (bits), \
444 pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
448 * pcpu_mem_zalloc - allocate memory
449 * @size: bytes to allocate
450 * @gfp: allocation flags
452 * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
453 * kzalloc() is used; otherwise, the equivalent of vzalloc() is used.
454 * This is to facilitate passing through whitelisted flags. The
455 * returned memory is always zeroed.
458 * Pointer to the allocated area on success, NULL on failure.
460 static void *pcpu_mem_zalloc(size_t size, gfp_t gfp)
462 if (WARN_ON_ONCE(!slab_is_available()))
465 if (size <= PAGE_SIZE)
466 return kzalloc(size, gfp);
468 return __vmalloc(size, gfp | __GFP_ZERO, PAGE_KERNEL);
472 * pcpu_mem_free - free memory
473 * @ptr: memory to free
475 * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc().
477 static void pcpu_mem_free(void *ptr)
483 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
484 * @chunk: chunk of interest
485 * @oslot: the previous slot it was on
487 * This function is called after an allocation or free changed @chunk.
488 * New slot according to the changed state is determined and @chunk is
489 * moved to the slot. Note that the reserved chunk is never put on
495 static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
497 int nslot = pcpu_chunk_slot(chunk);
499 if (chunk != pcpu_reserved_chunk && oslot != nslot) {
501 list_move(&chunk->list, &pcpu_slot[nslot]);
503 list_move_tail(&chunk->list, &pcpu_slot[nslot]);
508 * pcpu_cnt_pop_pages- counts populated backing pages in range
509 * @chunk: chunk of interest
510 * @bit_off: start offset
511 * @bits: size of area to check
513 * Calculates the number of populated pages in the region
514 * [page_start, page_end). This keeps track of how many empty populated
515 * pages are available and decide if async work should be scheduled.
518 * The nr of populated pages.
520 static inline int pcpu_cnt_pop_pages(struct pcpu_chunk *chunk, int bit_off,
523 int page_start = PFN_UP(bit_off * PCPU_MIN_ALLOC_SIZE);
524 int page_end = PFN_DOWN((bit_off + bits) * PCPU_MIN_ALLOC_SIZE);
526 if (page_start >= page_end)
530 * bitmap_weight counts the number of bits set in a bitmap up to
531 * the specified number of bits. This is counting the populated
532 * pages up to page_end and then subtracting the populated pages
533 * up to page_start to count the populated pages in
534 * [page_start, page_end).
536 return bitmap_weight(chunk->populated, page_end) -
537 bitmap_weight(chunk->populated, page_start);
541 * pcpu_chunk_update - updates the chunk metadata given a free area
542 * @chunk: chunk of interest
543 * @bit_off: chunk offset
544 * @bits: size of free area
546 * This updates the chunk's contig hint and starting offset given a free area.
547 * Choose the best starting offset if the contig hint is equal.
549 static void pcpu_chunk_update(struct pcpu_chunk *chunk, int bit_off, int bits)
551 if (bits > chunk->contig_bits) {
552 chunk->contig_bits_start = bit_off;
553 chunk->contig_bits = bits;
554 } else if (bits == chunk->contig_bits && chunk->contig_bits_start &&
556 __ffs(bit_off) > __ffs(chunk->contig_bits_start))) {
557 /* use the start with the best alignment */
558 chunk->contig_bits_start = bit_off;
563 * pcpu_chunk_refresh_hint - updates metadata about a chunk
564 * @chunk: chunk of interest
566 * Iterates over the metadata blocks to find the largest contig area.
567 * It also counts the populated pages and uses the delta to update the
572 * chunk->contig_bits_start
573 * nr_empty_pop_pages (chunk and global)
575 static void pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk)
577 int bit_off, bits, nr_empty_pop_pages;
580 chunk->contig_bits = 0;
582 bit_off = chunk->first_bit;
583 bits = nr_empty_pop_pages = 0;
584 pcpu_for_each_md_free_region(chunk, bit_off, bits) {
585 pcpu_chunk_update(chunk, bit_off, bits);
587 nr_empty_pop_pages += pcpu_cnt_pop_pages(chunk, bit_off, bits);
591 * Keep track of nr_empty_pop_pages.
593 * The chunk maintains the previous number of free pages it held,
594 * so the delta is used to update the global counter. The reserved
595 * chunk is not part of the free page count as they are populated
596 * at init and are special to serving reserved allocations.
598 if (chunk != pcpu_reserved_chunk)
599 pcpu_nr_empty_pop_pages +=
600 (nr_empty_pop_pages - chunk->nr_empty_pop_pages);
602 chunk->nr_empty_pop_pages = nr_empty_pop_pages;
606 * pcpu_block_update - updates a block given a free area
607 * @block: block of interest
608 * @start: start offset in block
609 * @end: end offset in block
611 * Updates a block given a known free area. The region [start, end) is
612 * expected to be the entirety of the free area within a block. Chooses
613 * the best starting offset if the contig hints are equal.
615 static void pcpu_block_update(struct pcpu_block_md *block, int start, int end)
617 int contig = end - start;
619 block->first_free = min(block->first_free, start);
621 block->left_free = contig;
623 if (end == PCPU_BITMAP_BLOCK_BITS)
624 block->right_free = contig;
626 if (contig > block->contig_hint) {
627 block->contig_hint_start = start;
628 block->contig_hint = contig;
629 } else if (block->contig_hint_start && contig == block->contig_hint &&
630 (!start || __ffs(start) > __ffs(block->contig_hint_start))) {
631 /* use the start with the best alignment */
632 block->contig_hint_start = start;
637 * pcpu_block_refresh_hint
638 * @chunk: chunk of interest
639 * @index: index of the metadata block
641 * Scans over the block beginning at first_free and updates the block
642 * metadata accordingly.
644 static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index)
646 struct pcpu_block_md *block = chunk->md_blocks + index;
647 unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index);
648 int rs, re; /* region start, region end */
651 block->contig_hint = 0;
652 block->left_free = block->right_free = 0;
654 /* iterate over free areas and update the contig hints */
655 pcpu_for_each_unpop_region(alloc_map, rs, re, block->first_free,
656 PCPU_BITMAP_BLOCK_BITS) {
657 pcpu_block_update(block, rs, re);
662 * pcpu_block_update_hint_alloc - update hint on allocation path
663 * @chunk: chunk of interest
664 * @bit_off: chunk offset
665 * @bits: size of request
667 * Updates metadata for the allocation path. The metadata only has to be
668 * refreshed by a full scan iff the chunk's contig hint is broken. Block level
669 * scans are required if the block's contig hint is broken.
671 static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off,
674 struct pcpu_block_md *s_block, *e_block, *block;
675 int s_index, e_index; /* block indexes of the freed allocation */
676 int s_off, e_off; /* block offsets of the freed allocation */
679 * Calculate per block offsets.
680 * The calculation uses an inclusive range, but the resulting offsets
681 * are [start, end). e_index always points to the last block in the
684 s_index = pcpu_off_to_block_index(bit_off);
685 e_index = pcpu_off_to_block_index(bit_off + bits - 1);
686 s_off = pcpu_off_to_block_off(bit_off);
687 e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1;
689 s_block = chunk->md_blocks + s_index;
690 e_block = chunk->md_blocks + e_index;
694 * block->first_free must be updated if the allocation takes its place.
695 * If the allocation breaks the contig_hint, a scan is required to
698 if (s_off == s_block->first_free)
699 s_block->first_free = find_next_zero_bit(
700 pcpu_index_alloc_map(chunk, s_index),
701 PCPU_BITMAP_BLOCK_BITS,
704 if (s_off >= s_block->contig_hint_start &&
705 s_off < s_block->contig_hint_start + s_block->contig_hint) {
706 /* block contig hint is broken - scan to fix it */
707 pcpu_block_refresh_hint(chunk, s_index);
709 /* update left and right contig manually */
710 s_block->left_free = min(s_block->left_free, s_off);
711 if (s_index == e_index)
712 s_block->right_free = min_t(int, s_block->right_free,
713 PCPU_BITMAP_BLOCK_BITS - e_off);
715 s_block->right_free = 0;
721 if (s_index != e_index) {
723 * When the allocation is across blocks, the end is along
724 * the left part of the e_block.
726 e_block->first_free = find_next_zero_bit(
727 pcpu_index_alloc_map(chunk, e_index),
728 PCPU_BITMAP_BLOCK_BITS, e_off);
730 if (e_off == PCPU_BITMAP_BLOCK_BITS) {
731 /* reset the block */
734 if (e_off > e_block->contig_hint_start) {
735 /* contig hint is broken - scan to fix it */
736 pcpu_block_refresh_hint(chunk, e_index);
738 e_block->left_free = 0;
739 e_block->right_free =
740 min_t(int, e_block->right_free,
741 PCPU_BITMAP_BLOCK_BITS - e_off);
745 /* update in-between md_blocks */
746 for (block = s_block + 1; block < e_block; block++) {
747 block->contig_hint = 0;
748 block->left_free = 0;
749 block->right_free = 0;
754 * The only time a full chunk scan is required is if the chunk
755 * contig hint is broken. Otherwise, it means a smaller space
756 * was used and therefore the chunk contig hint is still correct.
758 if (bit_off >= chunk->contig_bits_start &&
759 bit_off < chunk->contig_bits_start + chunk->contig_bits)
760 pcpu_chunk_refresh_hint(chunk);
764 * pcpu_block_update_hint_free - updates the block hints on the free path
765 * @chunk: chunk of interest
766 * @bit_off: chunk offset
767 * @bits: size of request
769 * Updates metadata for the allocation path. This avoids a blind block
770 * refresh by making use of the block contig hints. If this fails, it scans
771 * forward and backward to determine the extent of the free area. This is
772 * capped at the boundary of blocks.
774 * A chunk update is triggered if a page becomes free, a block becomes free,
775 * or the free spans across blocks. This tradeoff is to minimize iterating
776 * over the block metadata to update chunk->contig_bits. chunk->contig_bits
777 * may be off by up to a page, but it will never be more than the available
778 * space. If the contig hint is contained in one block, it will be accurate.
780 static void pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off,
783 struct pcpu_block_md *s_block, *e_block, *block;
784 int s_index, e_index; /* block indexes of the freed allocation */
785 int s_off, e_off; /* block offsets of the freed allocation */
786 int start, end; /* start and end of the whole free area */
789 * Calculate per block offsets.
790 * The calculation uses an inclusive range, but the resulting offsets
791 * are [start, end). e_index always points to the last block in the
794 s_index = pcpu_off_to_block_index(bit_off);
795 e_index = pcpu_off_to_block_index(bit_off + bits - 1);
796 s_off = pcpu_off_to_block_off(bit_off);
797 e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1;
799 s_block = chunk->md_blocks + s_index;
800 e_block = chunk->md_blocks + e_index;
803 * Check if the freed area aligns with the block->contig_hint.
804 * If it does, then the scan to find the beginning/end of the
805 * larger free area can be avoided.
807 * start and end refer to beginning and end of the free area
808 * within each their respective blocks. This is not necessarily
809 * the entire free area as it may span blocks past the beginning
810 * or end of the block.
813 if (s_off == s_block->contig_hint + s_block->contig_hint_start) {
814 start = s_block->contig_hint_start;
817 * Scan backwards to find the extent of the free area.
818 * find_last_bit returns the starting bit, so if the start bit
819 * is returned, that means there was no last bit and the
820 * remainder of the chunk is free.
822 int l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index),
824 start = (start == l_bit) ? 0 : l_bit + 1;
828 if (e_off == e_block->contig_hint_start)
829 end = e_block->contig_hint_start + e_block->contig_hint;
831 end = find_next_bit(pcpu_index_alloc_map(chunk, e_index),
832 PCPU_BITMAP_BLOCK_BITS, end);
835 e_off = (s_index == e_index) ? end : PCPU_BITMAP_BLOCK_BITS;
836 pcpu_block_update(s_block, start, e_off);
838 /* freeing in the same block */
839 if (s_index != e_index) {
841 pcpu_block_update(e_block, 0, end);
843 /* reset md_blocks in the middle */
844 for (block = s_block + 1; block < e_block; block++) {
845 block->first_free = 0;
846 block->contig_hint_start = 0;
847 block->contig_hint = PCPU_BITMAP_BLOCK_BITS;
848 block->left_free = PCPU_BITMAP_BLOCK_BITS;
849 block->right_free = PCPU_BITMAP_BLOCK_BITS;
854 * Refresh chunk metadata when the free makes a page free, a block
855 * free, or spans across blocks. The contig hint may be off by up to
856 * a page, but if the hint is contained in a block, it will be accurate
857 * with the else condition below.
859 if ((ALIGN_DOWN(end, min(PCPU_BITS_PER_PAGE, PCPU_BITMAP_BLOCK_BITS)) >
860 ALIGN(start, min(PCPU_BITS_PER_PAGE, PCPU_BITMAP_BLOCK_BITS))) ||
862 pcpu_chunk_refresh_hint(chunk);
864 pcpu_chunk_update(chunk, pcpu_block_off_to_off(s_index, start),
865 s_block->contig_hint);
869 * pcpu_is_populated - determines if the region is populated
870 * @chunk: chunk of interest
871 * @bit_off: chunk offset
872 * @bits: size of area
873 * @next_off: return value for the next offset to start searching
875 * For atomic allocations, check if the backing pages are populated.
878 * Bool if the backing pages are populated.
879 * next_index is to skip over unpopulated blocks in pcpu_find_block_fit.
881 static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits,
884 int page_start, page_end, rs, re;
886 page_start = PFN_DOWN(bit_off * PCPU_MIN_ALLOC_SIZE);
887 page_end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE);
890 pcpu_next_unpop(chunk->populated, &rs, &re, page_end);
894 *next_off = re * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE;
899 * pcpu_find_block_fit - finds the block index to start searching
900 * @chunk: chunk of interest
901 * @alloc_bits: size of request in allocation units
902 * @align: alignment of area (max PAGE_SIZE bytes)
903 * @pop_only: use populated regions only
905 * Given a chunk and an allocation spec, find the offset to begin searching
906 * for a free region. This iterates over the bitmap metadata blocks to
907 * find an offset that will be guaranteed to fit the requirements. It is
908 * not quite first fit as if the allocation does not fit in the contig hint
909 * of a block or chunk, it is skipped. This errs on the side of caution
910 * to prevent excess iteration. Poor alignment can cause the allocator to
911 * skip over blocks and chunks that have valid free areas.
914 * The offset in the bitmap to begin searching.
915 * -1 if no offset is found.
917 static int pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits,
918 size_t align, bool pop_only)
920 int bit_off, bits, next_off;
923 * Check to see if the allocation can fit in the chunk's contig hint.
924 * This is an optimization to prevent scanning by assuming if it
925 * cannot fit in the global hint, there is memory pressure and creating
926 * a new chunk would happen soon.
928 bit_off = ALIGN(chunk->contig_bits_start, align) -
929 chunk->contig_bits_start;
930 if (bit_off + alloc_bits > chunk->contig_bits)
933 bit_off = chunk->first_bit;
935 pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) {
936 if (!pop_only || pcpu_is_populated(chunk, bit_off, bits,
944 if (bit_off == pcpu_chunk_map_bits(chunk))
951 * pcpu_alloc_area - allocates an area from a pcpu_chunk
952 * @chunk: chunk of interest
953 * @alloc_bits: size of request in allocation units
954 * @align: alignment of area (max PAGE_SIZE)
955 * @start: bit_off to start searching
957 * This function takes in a @start offset to begin searching to fit an
958 * allocation of @alloc_bits with alignment @align. It needs to scan
959 * the allocation map because if it fits within the block's contig hint,
960 * @start will be block->first_free. This is an attempt to fill the
961 * allocation prior to breaking the contig hint. The allocation and
962 * boundary maps are updated accordingly if it confirms a valid
966 * Allocated addr offset in @chunk on success.
967 * -1 if no matching area is found.
969 static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits,
970 size_t align, int start)
972 size_t align_mask = (align) ? (align - 1) : 0;
973 int bit_off, end, oslot;
975 lockdep_assert_held(&pcpu_lock);
977 oslot = pcpu_chunk_slot(chunk);
980 * Search to find a fit.
982 end = start + alloc_bits + PCPU_BITMAP_BLOCK_BITS;
983 bit_off = bitmap_find_next_zero_area(chunk->alloc_map, end, start,
984 alloc_bits, align_mask);
988 /* update alloc map */
989 bitmap_set(chunk->alloc_map, bit_off, alloc_bits);
991 /* update boundary map */
992 set_bit(bit_off, chunk->bound_map);
993 bitmap_clear(chunk->bound_map, bit_off + 1, alloc_bits - 1);
994 set_bit(bit_off + alloc_bits, chunk->bound_map);
996 chunk->free_bytes -= alloc_bits * PCPU_MIN_ALLOC_SIZE;
998 /* update first free bit */
999 if (bit_off == chunk->first_bit)
1000 chunk->first_bit = find_next_zero_bit(
1002 pcpu_chunk_map_bits(chunk),
1003 bit_off + alloc_bits);
1005 pcpu_block_update_hint_alloc(chunk, bit_off, alloc_bits);
1007 pcpu_chunk_relocate(chunk, oslot);
1009 return bit_off * PCPU_MIN_ALLOC_SIZE;
1013 * pcpu_free_area - frees the corresponding offset
1014 * @chunk: chunk of interest
1015 * @off: addr offset into chunk
1017 * This function determines the size of an allocation to free using
1018 * the boundary bitmap and clears the allocation map.
1020 static void pcpu_free_area(struct pcpu_chunk *chunk, int off)
1022 int bit_off, bits, end, oslot;
1024 lockdep_assert_held(&pcpu_lock);
1025 pcpu_stats_area_dealloc(chunk);
1027 oslot = pcpu_chunk_slot(chunk);
1029 bit_off = off / PCPU_MIN_ALLOC_SIZE;
1031 /* find end index */
1032 end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk),
1034 bits = end - bit_off;
1035 bitmap_clear(chunk->alloc_map, bit_off, bits);
1037 /* update metadata */
1038 chunk->free_bytes += bits * PCPU_MIN_ALLOC_SIZE;
1040 /* update first free bit */
1041 chunk->first_bit = min(chunk->first_bit, bit_off);
1043 pcpu_block_update_hint_free(chunk, bit_off, bits);
1045 pcpu_chunk_relocate(chunk, oslot);
1048 static void pcpu_init_md_blocks(struct pcpu_chunk *chunk)
1050 struct pcpu_block_md *md_block;
1052 for (md_block = chunk->md_blocks;
1053 md_block != chunk->md_blocks + pcpu_chunk_nr_blocks(chunk);
1055 md_block->contig_hint = PCPU_BITMAP_BLOCK_BITS;
1056 md_block->left_free = PCPU_BITMAP_BLOCK_BITS;
1057 md_block->right_free = PCPU_BITMAP_BLOCK_BITS;
1062 * pcpu_alloc_first_chunk - creates chunks that serve the first chunk
1063 * @tmp_addr: the start of the region served
1064 * @map_size: size of the region served
1066 * This is responsible for creating the chunks that serve the first chunk. The
1067 * base_addr is page aligned down of @tmp_addr while the region end is page
1068 * aligned up. Offsets are kept track of to determine the region served. All
1069 * this is done to appease the bitmap allocator in avoiding partial blocks.
1072 * Chunk serving the region at @tmp_addr of @map_size.
1074 static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
1077 struct pcpu_chunk *chunk;
1078 unsigned long aligned_addr, lcm_align;
1079 int start_offset, offset_bits, region_size, region_bits;
1081 /* region calculations */
1082 aligned_addr = tmp_addr & PAGE_MASK;
1084 start_offset = tmp_addr - aligned_addr;
1087 * Align the end of the region with the LCM of PAGE_SIZE and
1088 * PCPU_BITMAP_BLOCK_SIZE. One of these constants is a multiple of
1091 lcm_align = lcm(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE);
1092 region_size = ALIGN(start_offset + map_size, lcm_align);
1094 /* allocate chunk */
1095 chunk = memblock_virt_alloc(sizeof(struct pcpu_chunk) +
1096 BITS_TO_LONGS(region_size >> PAGE_SHIFT),
1099 INIT_LIST_HEAD(&chunk->list);
1101 chunk->base_addr = (void *)aligned_addr;
1102 chunk->start_offset = start_offset;
1103 chunk->end_offset = region_size - chunk->start_offset - map_size;
1105 chunk->nr_pages = region_size >> PAGE_SHIFT;
1106 region_bits = pcpu_chunk_map_bits(chunk);
1108 chunk->alloc_map = memblock_virt_alloc(BITS_TO_LONGS(region_bits) *
1109 sizeof(chunk->alloc_map[0]), 0);
1110 chunk->bound_map = memblock_virt_alloc(BITS_TO_LONGS(region_bits + 1) *
1111 sizeof(chunk->bound_map[0]), 0);
1112 chunk->md_blocks = memblock_virt_alloc(pcpu_chunk_nr_blocks(chunk) *
1113 sizeof(chunk->md_blocks[0]), 0);
1114 pcpu_init_md_blocks(chunk);
1116 /* manage populated page bitmap */
1117 chunk->immutable = true;
1118 bitmap_fill(chunk->populated, chunk->nr_pages);
1119 chunk->nr_populated = chunk->nr_pages;
1120 chunk->nr_empty_pop_pages =
1121 pcpu_cnt_pop_pages(chunk, start_offset / PCPU_MIN_ALLOC_SIZE,
1122 map_size / PCPU_MIN_ALLOC_SIZE);
1124 chunk->contig_bits = map_size / PCPU_MIN_ALLOC_SIZE;
1125 chunk->free_bytes = map_size;
1127 if (chunk->start_offset) {
1128 /* hide the beginning of the bitmap */
1129 offset_bits = chunk->start_offset / PCPU_MIN_ALLOC_SIZE;
1130 bitmap_set(chunk->alloc_map, 0, offset_bits);
1131 set_bit(0, chunk->bound_map);
1132 set_bit(offset_bits, chunk->bound_map);
1134 chunk->first_bit = offset_bits;
1136 pcpu_block_update_hint_alloc(chunk, 0, offset_bits);
1139 if (chunk->end_offset) {
1140 /* hide the end of the bitmap */
1141 offset_bits = chunk->end_offset / PCPU_MIN_ALLOC_SIZE;
1142 bitmap_set(chunk->alloc_map,
1143 pcpu_chunk_map_bits(chunk) - offset_bits,
1145 set_bit((start_offset + map_size) / PCPU_MIN_ALLOC_SIZE,
1147 set_bit(region_bits, chunk->bound_map);
1149 pcpu_block_update_hint_alloc(chunk, pcpu_chunk_map_bits(chunk)
1150 - offset_bits, offset_bits);
1156 static struct pcpu_chunk *pcpu_alloc_chunk(gfp_t gfp)
1158 struct pcpu_chunk *chunk;
1161 chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size, gfp);
1165 INIT_LIST_HEAD(&chunk->list);
1166 chunk->nr_pages = pcpu_unit_pages;
1167 region_bits = pcpu_chunk_map_bits(chunk);
1169 chunk->alloc_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits) *
1170 sizeof(chunk->alloc_map[0]), gfp);
1171 if (!chunk->alloc_map)
1172 goto alloc_map_fail;
1174 chunk->bound_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits + 1) *
1175 sizeof(chunk->bound_map[0]), gfp);
1176 if (!chunk->bound_map)
1177 goto bound_map_fail;
1179 chunk->md_blocks = pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk) *
1180 sizeof(chunk->md_blocks[0]), gfp);
1181 if (!chunk->md_blocks)
1182 goto md_blocks_fail;
1184 pcpu_init_md_blocks(chunk);
1187 chunk->contig_bits = region_bits;
1188 chunk->free_bytes = chunk->nr_pages * PAGE_SIZE;
1193 pcpu_mem_free(chunk->bound_map);
1195 pcpu_mem_free(chunk->alloc_map);
1197 pcpu_mem_free(chunk);
1202 static void pcpu_free_chunk(struct pcpu_chunk *chunk)
1206 pcpu_mem_free(chunk->bound_map);
1207 pcpu_mem_free(chunk->alloc_map);
1208 pcpu_mem_free(chunk);
1212 * pcpu_chunk_populated - post-population bookkeeping
1213 * @chunk: pcpu_chunk which got populated
1214 * @page_start: the start page
1215 * @page_end: the end page
1216 * @for_alloc: if this is to populate for allocation
1218 * Pages in [@page_start,@page_end) have been populated to @chunk. Update
1219 * the bookkeeping information accordingly. Must be called after each
1220 * successful population.
1222 * If this is @for_alloc, do not increment pcpu_nr_empty_pop_pages because it
1223 * is to serve an allocation in that area.
1225 static void pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start,
1226 int page_end, bool for_alloc)
1228 int nr = page_end - page_start;
1230 lockdep_assert_held(&pcpu_lock);
1232 bitmap_set(chunk->populated, page_start, nr);
1233 chunk->nr_populated += nr;
1236 chunk->nr_empty_pop_pages += nr;
1237 pcpu_nr_empty_pop_pages += nr;
1242 * pcpu_chunk_depopulated - post-depopulation bookkeeping
1243 * @chunk: pcpu_chunk which got depopulated
1244 * @page_start: the start page
1245 * @page_end: the end page
1247 * Pages in [@page_start,@page_end) have been depopulated from @chunk.
1248 * Update the bookkeeping information accordingly. Must be called after
1249 * each successful depopulation.
1251 static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk,
1252 int page_start, int page_end)
1254 int nr = page_end - page_start;
1256 lockdep_assert_held(&pcpu_lock);
1258 bitmap_clear(chunk->populated, page_start, nr);
1259 chunk->nr_populated -= nr;
1260 chunk->nr_empty_pop_pages -= nr;
1261 pcpu_nr_empty_pop_pages -= nr;
1265 * Chunk management implementation.
1267 * To allow different implementations, chunk alloc/free and
1268 * [de]population are implemented in a separate file which is pulled
1269 * into this file and compiled together. The following functions
1270 * should be implemented.
1272 * pcpu_populate_chunk - populate the specified range of a chunk
1273 * pcpu_depopulate_chunk - depopulate the specified range of a chunk
1274 * pcpu_create_chunk - create a new chunk
1275 * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop
1276 * pcpu_addr_to_page - translate address to physical address
1277 * pcpu_verify_alloc_info - check alloc_info is acceptable during init
1279 static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
1280 int page_start, int page_end, gfp_t gfp);
1281 static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
1282 int page_start, int page_end);
1283 static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp);
1284 static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
1285 static struct page *pcpu_addr_to_page(void *addr);
1286 static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
1288 #ifdef CONFIG_NEED_PER_CPU_KM
1289 #include "percpu-km.c"
1291 #include "percpu-vm.c"
1295 * pcpu_chunk_addr_search - determine chunk containing specified address
1296 * @addr: address for which the chunk needs to be determined.
1298 * This is an internal function that handles all but static allocations.
1299 * Static percpu address values should never be passed into the allocator.
1302 * The address of the found chunk.
1304 static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
1306 /* is it in the dynamic region (first chunk)? */
1307 if (pcpu_addr_in_chunk(pcpu_first_chunk, addr))
1308 return pcpu_first_chunk;
1310 /* is it in the reserved region? */
1311 if (pcpu_addr_in_chunk(pcpu_reserved_chunk, addr))
1312 return pcpu_reserved_chunk;
1315 * The address is relative to unit0 which might be unused and
1316 * thus unmapped. Offset the address to the unit space of the
1317 * current processor before looking it up in the vmalloc
1318 * space. Note that any possible cpu id can be used here, so
1319 * there's no need to worry about preemption or cpu hotplug.
1321 addr += pcpu_unit_offsets[raw_smp_processor_id()];
1322 return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
1326 * pcpu_alloc - the percpu allocator
1327 * @size: size of area to allocate in bytes
1328 * @align: alignment of area (max PAGE_SIZE)
1329 * @reserved: allocate from the reserved chunk if available
1330 * @gfp: allocation flags
1332 * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't
1333 * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN
1334 * then no warning will be triggered on invalid or failed allocation
1338 * Percpu pointer to the allocated area on success, NULL on failure.
1340 static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
1343 /* whitelisted flags that can be passed to the backing allocators */
1344 gfp_t pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
1345 bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
1346 bool do_warn = !(gfp & __GFP_NOWARN);
1347 static int warn_limit = 10;
1348 struct pcpu_chunk *chunk;
1350 int slot, off, cpu, ret;
1351 unsigned long flags;
1353 size_t bits, bit_align;
1356 * There is now a minimum allocation size of PCPU_MIN_ALLOC_SIZE,
1357 * therefore alignment must be a minimum of that many bytes.
1358 * An allocation may have internal fragmentation from rounding up
1359 * of up to PCPU_MIN_ALLOC_SIZE - 1 bytes.
1361 if (unlikely(align < PCPU_MIN_ALLOC_SIZE))
1362 align = PCPU_MIN_ALLOC_SIZE;
1364 size = ALIGN(size, PCPU_MIN_ALLOC_SIZE);
1365 bits = size >> PCPU_MIN_ALLOC_SHIFT;
1366 bit_align = align >> PCPU_MIN_ALLOC_SHIFT;
1368 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE ||
1369 !is_power_of_2(align))) {
1370 WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n",
1376 mutex_lock(&pcpu_alloc_mutex);
1378 spin_lock_irqsave(&pcpu_lock, flags);
1380 /* serve reserved allocations from the reserved chunk if available */
1381 if (reserved && pcpu_reserved_chunk) {
1382 chunk = pcpu_reserved_chunk;
1384 off = pcpu_find_block_fit(chunk, bits, bit_align, is_atomic);
1386 err = "alloc from reserved chunk failed";
1390 off = pcpu_alloc_area(chunk, bits, bit_align, off);
1394 err = "alloc from reserved chunk failed";
1399 /* search through normal chunks */
1400 for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
1401 list_for_each_entry(chunk, &pcpu_slot[slot], list) {
1402 off = pcpu_find_block_fit(chunk, bits, bit_align,
1407 off = pcpu_alloc_area(chunk, bits, bit_align, off);
1414 spin_unlock_irqrestore(&pcpu_lock, flags);
1417 * No space left. Create a new chunk. We don't want multiple
1418 * tasks to create chunks simultaneously. Serialize and create iff
1419 * there's still no empty chunk after grabbing the mutex.
1422 err = "atomic alloc failed, no space left";
1426 if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
1427 chunk = pcpu_create_chunk(pcpu_gfp);
1429 err = "failed to allocate new chunk";
1433 spin_lock_irqsave(&pcpu_lock, flags);
1434 pcpu_chunk_relocate(chunk, -1);
1436 spin_lock_irqsave(&pcpu_lock, flags);
1442 pcpu_stats_area_alloc(chunk, size);
1443 spin_unlock_irqrestore(&pcpu_lock, flags);
1445 /* populate if not all pages are already there */
1447 int page_start, page_end, rs, re;
1449 page_start = PFN_DOWN(off);
1450 page_end = PFN_UP(off + size);
1452 pcpu_for_each_unpop_region(chunk->populated, rs, re,
1453 page_start, page_end) {
1454 WARN_ON(chunk->immutable);
1456 ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp);
1458 spin_lock_irqsave(&pcpu_lock, flags);
1460 pcpu_free_area(chunk, off);
1461 err = "failed to populate";
1464 pcpu_chunk_populated(chunk, rs, re, true);
1465 spin_unlock_irqrestore(&pcpu_lock, flags);
1468 mutex_unlock(&pcpu_alloc_mutex);
1471 if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
1472 pcpu_schedule_balance_work();
1474 /* clear the areas and return address relative to base address */
1475 for_each_possible_cpu(cpu)
1476 memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
1478 ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
1479 kmemleak_alloc_percpu(ptr, size, gfp);
1481 trace_percpu_alloc_percpu(reserved, is_atomic, size, align,
1482 chunk->base_addr, off, ptr);
1487 spin_unlock_irqrestore(&pcpu_lock, flags);
1489 trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);
1491 if (!is_atomic && do_warn && warn_limit) {
1492 pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
1493 size, align, is_atomic, err);
1496 pr_info("limit reached, disable warning\n");
1499 /* see the flag handling in pcpu_blance_workfn() */
1500 pcpu_atomic_alloc_failed = true;
1501 pcpu_schedule_balance_work();
1503 mutex_unlock(&pcpu_alloc_mutex);
1509 * __alloc_percpu_gfp - allocate dynamic percpu area
1510 * @size: size of area to allocate in bytes
1511 * @align: alignment of area (max PAGE_SIZE)
1512 * @gfp: allocation flags
1514 * Allocate zero-filled percpu area of @size bytes aligned at @align. If
1515 * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
1516 * be called from any context but is a lot more likely to fail. If @gfp
1517 * has __GFP_NOWARN then no warning will be triggered on invalid or failed
1518 * allocation requests.
1521 * Percpu pointer to the allocated area on success, NULL on failure.
1523 void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp)
1525 return pcpu_alloc(size, align, false, gfp);
1527 EXPORT_SYMBOL_GPL(__alloc_percpu_gfp);
1530 * __alloc_percpu - allocate dynamic percpu area
1531 * @size: size of area to allocate in bytes
1532 * @align: alignment of area (max PAGE_SIZE)
1534 * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL).
1536 void __percpu *__alloc_percpu(size_t size, size_t align)
1538 return pcpu_alloc(size, align, false, GFP_KERNEL);
1540 EXPORT_SYMBOL_GPL(__alloc_percpu);
1543 * __alloc_reserved_percpu - allocate reserved percpu area
1544 * @size: size of area to allocate in bytes
1545 * @align: alignment of area (max PAGE_SIZE)
1547 * Allocate zero-filled percpu area of @size bytes aligned at @align
1548 * from reserved percpu area if arch has set it up; otherwise,
1549 * allocation is served from the same dynamic area. Might sleep.
1550 * Might trigger writeouts.
1553 * Does GFP_KERNEL allocation.
1556 * Percpu pointer to the allocated area on success, NULL on failure.
1558 void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
1560 return pcpu_alloc(size, align, true, GFP_KERNEL);
1564 * pcpu_balance_workfn - manage the amount of free chunks and populated pages
1567 * Reclaim all fully free chunks except for the first one. This is also
1568 * responsible for maintaining the pool of empty populated pages. However,
1569 * it is possible that this is called when physical memory is scarce causing
1570 * OOM killer to be triggered. We should avoid doing so until an actual
1571 * allocation causes the failure as it is possible that requests can be
1572 * serviced from already backed regions.
1574 static void pcpu_balance_workfn(struct work_struct *work)
1576 /* gfp flags passed to underlying allocators */
1577 const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
1579 struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1];
1580 struct pcpu_chunk *chunk, *next;
1581 int slot, nr_to_pop, ret;
1584 * There's no reason to keep around multiple unused chunks and VM
1585 * areas can be scarce. Destroy all free chunks except for one.
1587 mutex_lock(&pcpu_alloc_mutex);
1588 spin_lock_irq(&pcpu_lock);
1590 list_for_each_entry_safe(chunk, next, free_head, list) {
1591 WARN_ON(chunk->immutable);
1593 /* spare the first one */
1594 if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
1597 list_move(&chunk->list, &to_free);
1600 spin_unlock_irq(&pcpu_lock);
1602 list_for_each_entry_safe(chunk, next, &to_free, list) {
1605 pcpu_for_each_pop_region(chunk->populated, rs, re, 0,
1607 pcpu_depopulate_chunk(chunk, rs, re);
1608 spin_lock_irq(&pcpu_lock);
1609 pcpu_chunk_depopulated(chunk, rs, re);
1610 spin_unlock_irq(&pcpu_lock);
1612 pcpu_destroy_chunk(chunk);
1616 * Ensure there are certain number of free populated pages for
1617 * atomic allocs. Fill up from the most packed so that atomic
1618 * allocs don't increase fragmentation. If atomic allocation
1619 * failed previously, always populate the maximum amount. This
1620 * should prevent atomic allocs larger than PAGE_SIZE from keeping
1621 * failing indefinitely; however, large atomic allocs are not
1622 * something we support properly and can be highly unreliable and
1626 if (pcpu_atomic_alloc_failed) {
1627 nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH;
1628 /* best effort anyway, don't worry about synchronization */
1629 pcpu_atomic_alloc_failed = false;
1631 nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH -
1632 pcpu_nr_empty_pop_pages,
1633 0, PCPU_EMPTY_POP_PAGES_HIGH);
1636 for (slot = pcpu_size_to_slot(PAGE_SIZE); slot < pcpu_nr_slots; slot++) {
1637 int nr_unpop = 0, rs, re;
1642 spin_lock_irq(&pcpu_lock);
1643 list_for_each_entry(chunk, &pcpu_slot[slot], list) {
1644 nr_unpop = chunk->nr_pages - chunk->nr_populated;
1648 spin_unlock_irq(&pcpu_lock);
1653 /* @chunk can't go away while pcpu_alloc_mutex is held */
1654 pcpu_for_each_unpop_region(chunk->populated, rs, re, 0,
1656 int nr = min(re - rs, nr_to_pop);
1658 ret = pcpu_populate_chunk(chunk, rs, rs + nr, gfp);
1661 spin_lock_irq(&pcpu_lock);
1662 pcpu_chunk_populated(chunk, rs, rs + nr, false);
1663 spin_unlock_irq(&pcpu_lock);
1674 /* ran out of chunks to populate, create a new one and retry */
1675 chunk = pcpu_create_chunk(gfp);
1677 spin_lock_irq(&pcpu_lock);
1678 pcpu_chunk_relocate(chunk, -1);
1679 spin_unlock_irq(&pcpu_lock);
1684 mutex_unlock(&pcpu_alloc_mutex);
1688 * free_percpu - free percpu area
1689 * @ptr: pointer to area to free
1691 * Free percpu area @ptr.
1694 * Can be called from atomic context.
1696 void free_percpu(void __percpu *ptr)
1699 struct pcpu_chunk *chunk;
1700 unsigned long flags;
1706 kmemleak_free_percpu(ptr);
1708 addr = __pcpu_ptr_to_addr(ptr);
1710 spin_lock_irqsave(&pcpu_lock, flags);
1712 chunk = pcpu_chunk_addr_search(addr);
1713 off = addr - chunk->base_addr;
1715 pcpu_free_area(chunk, off);
1717 /* if there are more than one fully free chunks, wake up grim reaper */
1718 if (chunk->free_bytes == pcpu_unit_size) {
1719 struct pcpu_chunk *pos;
1721 list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
1723 pcpu_schedule_balance_work();
1728 trace_percpu_free_percpu(chunk->base_addr, off, ptr);
1730 spin_unlock_irqrestore(&pcpu_lock, flags);
1732 EXPORT_SYMBOL_GPL(free_percpu);
1734 bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr)
1737 const size_t static_size = __per_cpu_end - __per_cpu_start;
1738 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
1741 for_each_possible_cpu(cpu) {
1742 void *start = per_cpu_ptr(base, cpu);
1743 void *va = (void *)addr;
1745 if (va >= start && va < start + static_size) {
1747 *can_addr = (unsigned long) (va - start);
1748 *can_addr += (unsigned long)
1749 per_cpu_ptr(base, get_boot_cpu_id());
1755 /* on UP, can't distinguish from other static vars, always false */
1760 * is_kernel_percpu_address - test whether address is from static percpu area
1761 * @addr: address to test
1763 * Test whether @addr belongs to in-kernel static percpu area. Module
1764 * static percpu areas are not considered. For those, use
1765 * is_module_percpu_address().
1768 * %true if @addr is from in-kernel static percpu area, %false otherwise.
1770 bool is_kernel_percpu_address(unsigned long addr)
1772 return __is_kernel_percpu_address(addr, NULL);
1776 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
1777 * @addr: the address to be converted to physical address
1779 * Given @addr which is dereferenceable address obtained via one of
1780 * percpu access macros, this function translates it into its physical
1781 * address. The caller is responsible for ensuring @addr stays valid
1782 * until this function finishes.
1784 * percpu allocator has special setup for the first chunk, which currently
1785 * supports either embedding in linear address space or vmalloc mapping,
1786 * and, from the second one, the backing allocator (currently either vm or
1787 * km) provides translation.
1789 * The addr can be translated simply without checking if it falls into the
1790 * first chunk. But the current code reflects better how percpu allocator
1791 * actually works, and the verification can discover both bugs in percpu
1792 * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
1796 * The physical address for @addr.
1798 phys_addr_t per_cpu_ptr_to_phys(void *addr)
1800 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
1801 bool in_first_chunk = false;
1802 unsigned long first_low, first_high;
1806 * The following test on unit_low/high isn't strictly
1807 * necessary but will speed up lookups of addresses which
1808 * aren't in the first chunk.
1810 * The address check is against full chunk sizes. pcpu_base_addr
1811 * points to the beginning of the first chunk including the
1812 * static region. Assumes good intent as the first chunk may
1813 * not be full (ie. < pcpu_unit_pages in size).
1815 first_low = (unsigned long)pcpu_base_addr +
1816 pcpu_unit_page_offset(pcpu_low_unit_cpu, 0);
1817 first_high = (unsigned long)pcpu_base_addr +
1818 pcpu_unit_page_offset(pcpu_high_unit_cpu, pcpu_unit_pages);
1819 if ((unsigned long)addr >= first_low &&
1820 (unsigned long)addr < first_high) {
1821 for_each_possible_cpu(cpu) {
1822 void *start = per_cpu_ptr(base, cpu);
1824 if (addr >= start && addr < start + pcpu_unit_size) {
1825 in_first_chunk = true;
1831 if (in_first_chunk) {
1832 if (!is_vmalloc_addr(addr))
1835 return page_to_phys(vmalloc_to_page(addr)) +
1836 offset_in_page(addr);
1838 return page_to_phys(pcpu_addr_to_page(addr)) +
1839 offset_in_page(addr);
1843 * pcpu_alloc_alloc_info - allocate percpu allocation info
1844 * @nr_groups: the number of groups
1845 * @nr_units: the number of units
1847 * Allocate ai which is large enough for @nr_groups groups containing
1848 * @nr_units units. The returned ai's groups[0].cpu_map points to the
1849 * cpu_map array which is long enough for @nr_units and filled with
1850 * NR_CPUS. It's the caller's responsibility to initialize cpu_map
1851 * pointer of other groups.
1854 * Pointer to the allocated pcpu_alloc_info on success, NULL on
1857 struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
1860 struct pcpu_alloc_info *ai;
1861 size_t base_size, ai_size;
1865 base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
1866 __alignof__(ai->groups[0].cpu_map[0]));
1867 ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
1869 ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), PAGE_SIZE);
1875 ai->groups[0].cpu_map = ptr;
1877 for (unit = 0; unit < nr_units; unit++)
1878 ai->groups[0].cpu_map[unit] = NR_CPUS;
1880 ai->nr_groups = nr_groups;
1881 ai->__ai_size = PFN_ALIGN(ai_size);
1887 * pcpu_free_alloc_info - free percpu allocation info
1888 * @ai: pcpu_alloc_info to free
1890 * Free @ai which was allocated by pcpu_alloc_alloc_info().
1892 void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
1894 memblock_free_early(__pa(ai), ai->__ai_size);
1898 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
1900 * @ai: allocation info to dump
1902 * Print out information about @ai using loglevel @lvl.
1904 static void pcpu_dump_alloc_info(const char *lvl,
1905 const struct pcpu_alloc_info *ai)
1907 int group_width = 1, cpu_width = 1, width;
1908 char empty_str[] = "--------";
1909 int alloc = 0, alloc_end = 0;
1911 int upa, apl; /* units per alloc, allocs per line */
1917 v = num_possible_cpus();
1920 empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
1922 upa = ai->alloc_size / ai->unit_size;
1923 width = upa * (cpu_width + 1) + group_width + 3;
1924 apl = rounddown_pow_of_two(max(60 / width, 1));
1926 printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
1927 lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
1928 ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
1930 for (group = 0; group < ai->nr_groups; group++) {
1931 const struct pcpu_group_info *gi = &ai->groups[group];
1932 int unit = 0, unit_end = 0;
1934 BUG_ON(gi->nr_units % upa);
1935 for (alloc_end += gi->nr_units / upa;
1936 alloc < alloc_end; alloc++) {
1937 if (!(alloc % apl)) {
1939 printk("%spcpu-alloc: ", lvl);
1941 pr_cont("[%0*d] ", group_width, group);
1943 for (unit_end += upa; unit < unit_end; unit++)
1944 if (gi->cpu_map[unit] != NR_CPUS)
1946 cpu_width, gi->cpu_map[unit]);
1948 pr_cont("%s ", empty_str);
1955 * pcpu_setup_first_chunk - initialize the first percpu chunk
1956 * @ai: pcpu_alloc_info describing how to percpu area is shaped
1957 * @base_addr: mapped address
1959 * Initialize the first percpu chunk which contains the kernel static
1960 * perpcu area. This function is to be called from arch percpu area
1963 * @ai contains all information necessary to initialize the first
1964 * chunk and prime the dynamic percpu allocator.
1966 * @ai->static_size is the size of static percpu area.
1968 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
1969 * reserve after the static area in the first chunk. This reserves
1970 * the first chunk such that it's available only through reserved
1971 * percpu allocation. This is primarily used to serve module percpu
1972 * static areas on architectures where the addressing model has
1973 * limited offset range for symbol relocations to guarantee module
1974 * percpu symbols fall inside the relocatable range.
1976 * @ai->dyn_size determines the number of bytes available for dynamic
1977 * allocation in the first chunk. The area between @ai->static_size +
1978 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
1980 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
1981 * and equal to or larger than @ai->static_size + @ai->reserved_size +
1984 * @ai->atom_size is the allocation atom size and used as alignment
1987 * @ai->alloc_size is the allocation size and always multiple of
1988 * @ai->atom_size. This is larger than @ai->atom_size if
1989 * @ai->unit_size is larger than @ai->atom_size.
1991 * @ai->nr_groups and @ai->groups describe virtual memory layout of
1992 * percpu areas. Units which should be colocated are put into the
1993 * same group. Dynamic VM areas will be allocated according to these
1994 * groupings. If @ai->nr_groups is zero, a single group containing
1995 * all units is assumed.
1997 * The caller should have mapped the first chunk at @base_addr and
1998 * copied static data to each unit.
2000 * The first chunk will always contain a static and a dynamic region.
2001 * However, the static region is not managed by any chunk. If the first
2002 * chunk also contains a reserved region, it is served by two chunks -
2003 * one for the reserved region and one for the dynamic region. They
2004 * share the same vm, but use offset regions in the area allocation map.
2005 * The chunk serving the dynamic region is circulated in the chunk slots
2006 * and available for dynamic allocation like any other chunk.
2009 * 0 on success, -errno on failure.
2011 int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
2014 size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
2015 size_t static_size, dyn_size;
2016 struct pcpu_chunk *chunk;
2017 unsigned long *group_offsets;
2018 size_t *group_sizes;
2019 unsigned long *unit_off;
2024 unsigned long tmp_addr;
2026 #define PCPU_SETUP_BUG_ON(cond) do { \
2027 if (unlikely(cond)) { \
2028 pr_emerg("failed to initialize, %s\n", #cond); \
2029 pr_emerg("cpu_possible_mask=%*pb\n", \
2030 cpumask_pr_args(cpu_possible_mask)); \
2031 pcpu_dump_alloc_info(KERN_EMERG, ai); \
2037 PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
2039 PCPU_SETUP_BUG_ON(!ai->static_size);
2040 PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start));
2042 PCPU_SETUP_BUG_ON(!base_addr);
2043 PCPU_SETUP_BUG_ON(offset_in_page(base_addr));
2044 PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
2045 PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size));
2046 PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
2047 PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->unit_size, PCPU_BITMAP_BLOCK_SIZE));
2048 PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
2049 PCPU_SETUP_BUG_ON(!ai->dyn_size);
2050 PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->reserved_size, PCPU_MIN_ALLOC_SIZE));
2051 PCPU_SETUP_BUG_ON(!(IS_ALIGNED(PCPU_BITMAP_BLOCK_SIZE, PAGE_SIZE) ||
2052 IS_ALIGNED(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE)));
2053 PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
2055 /* process group information and build config tables accordingly */
2056 group_offsets = memblock_virt_alloc(ai->nr_groups *
2057 sizeof(group_offsets[0]), 0);
2058 group_sizes = memblock_virt_alloc(ai->nr_groups *
2059 sizeof(group_sizes[0]), 0);
2060 unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0);
2061 unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0);
2063 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
2064 unit_map[cpu] = UINT_MAX;
2066 pcpu_low_unit_cpu = NR_CPUS;
2067 pcpu_high_unit_cpu = NR_CPUS;
2069 for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
2070 const struct pcpu_group_info *gi = &ai->groups[group];
2072 group_offsets[group] = gi->base_offset;
2073 group_sizes[group] = gi->nr_units * ai->unit_size;
2075 for (i = 0; i < gi->nr_units; i++) {
2076 cpu = gi->cpu_map[i];
2080 PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids);
2081 PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
2082 PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
2084 unit_map[cpu] = unit + i;
2085 unit_off[cpu] = gi->base_offset + i * ai->unit_size;
2087 /* determine low/high unit_cpu */
2088 if (pcpu_low_unit_cpu == NR_CPUS ||
2089 unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
2090 pcpu_low_unit_cpu = cpu;
2091 if (pcpu_high_unit_cpu == NR_CPUS ||
2092 unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
2093 pcpu_high_unit_cpu = cpu;
2096 pcpu_nr_units = unit;
2098 for_each_possible_cpu(cpu)
2099 PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
2101 /* we're done parsing the input, undefine BUG macro and dump config */
2102 #undef PCPU_SETUP_BUG_ON
2103 pcpu_dump_alloc_info(KERN_DEBUG, ai);
2105 pcpu_nr_groups = ai->nr_groups;
2106 pcpu_group_offsets = group_offsets;
2107 pcpu_group_sizes = group_sizes;
2108 pcpu_unit_map = unit_map;
2109 pcpu_unit_offsets = unit_off;
2111 /* determine basic parameters */
2112 pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
2113 pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
2114 pcpu_atom_size = ai->atom_size;
2115 pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
2116 BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
2118 pcpu_stats_save_ai(ai);
2121 * Allocate chunk slots. The additional last slot is for
2124 pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
2125 pcpu_slot = memblock_virt_alloc(
2126 pcpu_nr_slots * sizeof(pcpu_slot[0]), 0);
2127 for (i = 0; i < pcpu_nr_slots; i++)
2128 INIT_LIST_HEAD(&pcpu_slot[i]);
2131 * The end of the static region needs to be aligned with the
2132 * minimum allocation size as this offsets the reserved and
2133 * dynamic region. The first chunk ends page aligned by
2134 * expanding the dynamic region, therefore the dynamic region
2135 * can be shrunk to compensate while still staying above the
2138 static_size = ALIGN(ai->static_size, PCPU_MIN_ALLOC_SIZE);
2139 dyn_size = ai->dyn_size - (static_size - ai->static_size);
2142 * Initialize first chunk.
2143 * If the reserved_size is non-zero, this initializes the reserved
2144 * chunk. If the reserved_size is zero, the reserved chunk is NULL
2145 * and the dynamic region is initialized here. The first chunk,
2146 * pcpu_first_chunk, will always point to the chunk that serves
2147 * the dynamic region.
2149 tmp_addr = (unsigned long)base_addr + static_size;
2150 map_size = ai->reserved_size ?: dyn_size;
2151 chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
2153 /* init dynamic chunk if necessary */
2154 if (ai->reserved_size) {
2155 pcpu_reserved_chunk = chunk;
2157 tmp_addr = (unsigned long)base_addr + static_size +
2159 map_size = dyn_size;
2160 chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
2163 /* link the first chunk in */
2164 pcpu_first_chunk = chunk;
2165 pcpu_nr_empty_pop_pages = pcpu_first_chunk->nr_empty_pop_pages;
2166 pcpu_chunk_relocate(pcpu_first_chunk, -1);
2168 pcpu_stats_chunk_alloc();
2169 trace_percpu_create_chunk(base_addr);
2172 pcpu_base_addr = base_addr;
2178 const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = {
2179 [PCPU_FC_AUTO] = "auto",
2180 [PCPU_FC_EMBED] = "embed",
2181 [PCPU_FC_PAGE] = "page",
2184 enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
2186 static int __init percpu_alloc_setup(char *str)
2193 #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
2194 else if (!strcmp(str, "embed"))
2195 pcpu_chosen_fc = PCPU_FC_EMBED;
2197 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2198 else if (!strcmp(str, "page"))
2199 pcpu_chosen_fc = PCPU_FC_PAGE;
2202 pr_warn("unknown allocator %s specified\n", str);
2206 early_param("percpu_alloc", percpu_alloc_setup);
2209 * pcpu_embed_first_chunk() is used by the generic percpu setup.
2210 * Build it if needed by the arch config or the generic setup is going
2213 #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
2214 !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
2215 #define BUILD_EMBED_FIRST_CHUNK
2218 /* build pcpu_page_first_chunk() iff needed by the arch config */
2219 #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
2220 #define BUILD_PAGE_FIRST_CHUNK
2223 /* pcpu_build_alloc_info() is used by both embed and page first chunk */
2224 #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
2226 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
2227 * @reserved_size: the size of reserved percpu area in bytes
2228 * @dyn_size: minimum free size for dynamic allocation in bytes
2229 * @atom_size: allocation atom size
2230 * @cpu_distance_fn: callback to determine distance between cpus, optional
2232 * This function determines grouping of units, their mappings to cpus
2233 * and other parameters considering needed percpu size, allocation
2234 * atom size and distances between CPUs.
2236 * Groups are always multiples of atom size and CPUs which are of
2237 * LOCAL_DISTANCE both ways are grouped together and share space for
2238 * units in the same group. The returned configuration is guaranteed
2239 * to have CPUs on different nodes on different groups and >=75% usage
2240 * of allocated virtual address space.
2243 * On success, pointer to the new allocation_info is returned. On
2244 * failure, ERR_PTR value is returned.
2246 static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
2247 size_t reserved_size, size_t dyn_size,
2249 pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
2251 static int group_map[NR_CPUS] __initdata;
2252 static int group_cnt[NR_CPUS] __initdata;
2253 const size_t static_size = __per_cpu_end - __per_cpu_start;
2254 int nr_groups = 1, nr_units = 0;
2255 size_t size_sum, min_unit_size, alloc_size;
2256 int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */
2257 int last_allocs, group, unit;
2258 unsigned int cpu, tcpu;
2259 struct pcpu_alloc_info *ai;
2260 unsigned int *cpu_map;
2262 /* this function may be called multiple times */
2263 memset(group_map, 0, sizeof(group_map));
2264 memset(group_cnt, 0, sizeof(group_cnt));
2266 /* calculate size_sum and ensure dyn_size is enough for early alloc */
2267 size_sum = PFN_ALIGN(static_size + reserved_size +
2268 max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
2269 dyn_size = size_sum - static_size - reserved_size;
2272 * Determine min_unit_size, alloc_size and max_upa such that
2273 * alloc_size is multiple of atom_size and is the smallest
2274 * which can accommodate 4k aligned segments which are equal to
2275 * or larger than min_unit_size.
2277 min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
2279 /* determine the maximum # of units that can fit in an allocation */
2280 alloc_size = roundup(min_unit_size, atom_size);
2281 upa = alloc_size / min_unit_size;
2282 while (alloc_size % upa || (offset_in_page(alloc_size / upa)))
2286 /* group cpus according to their proximity */
2287 for_each_possible_cpu(cpu) {
2290 for_each_possible_cpu(tcpu) {
2293 if (group_map[tcpu] == group && cpu_distance_fn &&
2294 (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
2295 cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
2297 nr_groups = max(nr_groups, group + 1);
2301 group_map[cpu] = group;
2306 * Wasted space is caused by a ratio imbalance of upa to group_cnt.
2307 * Expand the unit_size until we use >= 75% of the units allocated.
2308 * Related to atom_size, which could be much larger than the unit_size.
2310 last_allocs = INT_MAX;
2311 for (upa = max_upa; upa; upa--) {
2312 int allocs = 0, wasted = 0;
2314 if (alloc_size % upa || (offset_in_page(alloc_size / upa)))
2317 for (group = 0; group < nr_groups; group++) {
2318 int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
2319 allocs += this_allocs;
2320 wasted += this_allocs * upa - group_cnt[group];
2324 * Don't accept if wastage is over 1/3. The
2325 * greater-than comparison ensures upa==1 always
2326 * passes the following check.
2328 if (wasted > num_possible_cpus() / 3)
2331 /* and then don't consume more memory */
2332 if (allocs > last_allocs)
2334 last_allocs = allocs;
2339 /* allocate and fill alloc_info */
2340 for (group = 0; group < nr_groups; group++)
2341 nr_units += roundup(group_cnt[group], upa);
2343 ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
2345 return ERR_PTR(-ENOMEM);
2346 cpu_map = ai->groups[0].cpu_map;
2348 for (group = 0; group < nr_groups; group++) {
2349 ai->groups[group].cpu_map = cpu_map;
2350 cpu_map += roundup(group_cnt[group], upa);
2353 ai->static_size = static_size;
2354 ai->reserved_size = reserved_size;
2355 ai->dyn_size = dyn_size;
2356 ai->unit_size = alloc_size / upa;
2357 ai->atom_size = atom_size;
2358 ai->alloc_size = alloc_size;
2360 for (group = 0, unit = 0; group_cnt[group]; group++) {
2361 struct pcpu_group_info *gi = &ai->groups[group];
2364 * Initialize base_offset as if all groups are located
2365 * back-to-back. The caller should update this to
2366 * reflect actual allocation.
2368 gi->base_offset = unit * ai->unit_size;
2370 for_each_possible_cpu(cpu)
2371 if (group_map[cpu] == group)
2372 gi->cpu_map[gi->nr_units++] = cpu;
2373 gi->nr_units = roundup(gi->nr_units, upa);
2374 unit += gi->nr_units;
2376 BUG_ON(unit != nr_units);
2380 #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
2382 #if defined(BUILD_EMBED_FIRST_CHUNK)
2384 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
2385 * @reserved_size: the size of reserved percpu area in bytes
2386 * @dyn_size: minimum free size for dynamic allocation in bytes
2387 * @atom_size: allocation atom size
2388 * @cpu_distance_fn: callback to determine distance between cpus, optional
2389 * @alloc_fn: function to allocate percpu page
2390 * @free_fn: function to free percpu page
2392 * This is a helper to ease setting up embedded first percpu chunk and
2393 * can be called where pcpu_setup_first_chunk() is expected.
2395 * If this function is used to setup the first chunk, it is allocated
2396 * by calling @alloc_fn and used as-is without being mapped into
2397 * vmalloc area. Allocations are always whole multiples of @atom_size
2398 * aligned to @atom_size.
2400 * This enables the first chunk to piggy back on the linear physical
2401 * mapping which often uses larger page size. Please note that this
2402 * can result in very sparse cpu->unit mapping on NUMA machines thus
2403 * requiring large vmalloc address space. Don't use this allocator if
2404 * vmalloc space is not orders of magnitude larger than distances
2405 * between node memory addresses (ie. 32bit NUMA machines).
2407 * @dyn_size specifies the minimum dynamic area size.
2409 * If the needed size is smaller than the minimum or specified unit
2410 * size, the leftover is returned using @free_fn.
2413 * 0 on success, -errno on failure.
2415 int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
2417 pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
2418 pcpu_fc_alloc_fn_t alloc_fn,
2419 pcpu_fc_free_fn_t free_fn)
2421 void *base = (void *)ULONG_MAX;
2422 void **areas = NULL;
2423 struct pcpu_alloc_info *ai;
2424 size_t size_sum, areas_size;
2425 unsigned long max_distance;
2426 int group, i, highest_group, rc;
2428 ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
2433 size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
2434 areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
2436 areas = memblock_virt_alloc_nopanic(areas_size, 0);
2442 /* allocate, copy and determine base address & max_distance */
2444 for (group = 0; group < ai->nr_groups; group++) {
2445 struct pcpu_group_info *gi = &ai->groups[group];
2446 unsigned int cpu = NR_CPUS;
2449 for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
2450 cpu = gi->cpu_map[i];
2451 BUG_ON(cpu == NR_CPUS);
2453 /* allocate space for the whole group */
2454 ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
2457 goto out_free_areas;
2459 /* kmemleak tracks the percpu allocations separately */
2463 base = min(ptr, base);
2464 if (ptr > areas[highest_group])
2465 highest_group = group;
2467 max_distance = areas[highest_group] - base;
2468 max_distance += ai->unit_size * ai->groups[highest_group].nr_units;
2470 /* warn if maximum distance is further than 75% of vmalloc space */
2471 if (max_distance > VMALLOC_TOTAL * 3 / 4) {
2472 pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n",
2473 max_distance, VMALLOC_TOTAL);
2474 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2475 /* and fail if we have fallback */
2477 goto out_free_areas;
2482 * Copy data and free unused parts. This should happen after all
2483 * allocations are complete; otherwise, we may end up with
2484 * overlapping groups.
2486 for (group = 0; group < ai->nr_groups; group++) {
2487 struct pcpu_group_info *gi = &ai->groups[group];
2488 void *ptr = areas[group];
2490 for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
2491 if (gi->cpu_map[i] == NR_CPUS) {
2492 /* unused unit, free whole */
2493 free_fn(ptr, ai->unit_size);
2496 /* copy and return the unused part */
2497 memcpy(ptr, __per_cpu_load, ai->static_size);
2498 free_fn(ptr + size_sum, ai->unit_size - size_sum);
2502 /* base address is now known, determine group base offsets */
2503 for (group = 0; group < ai->nr_groups; group++) {
2504 ai->groups[group].base_offset = areas[group] - base;
2507 pr_info("Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
2508 PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
2509 ai->dyn_size, ai->unit_size);
2511 rc = pcpu_setup_first_chunk(ai, base);
2515 for (group = 0; group < ai->nr_groups; group++)
2517 free_fn(areas[group],
2518 ai->groups[group].nr_units * ai->unit_size);
2520 pcpu_free_alloc_info(ai);
2522 memblock_free_early(__pa(areas), areas_size);
2525 #endif /* BUILD_EMBED_FIRST_CHUNK */
2527 #ifdef BUILD_PAGE_FIRST_CHUNK
2529 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
2530 * @reserved_size: the size of reserved percpu area in bytes
2531 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
2532 * @free_fn: function to free percpu page, always called with PAGE_SIZE
2533 * @populate_pte_fn: function to populate pte
2535 * This is a helper to ease setting up page-remapped first percpu
2536 * chunk and can be called where pcpu_setup_first_chunk() is expected.
2538 * This is the basic allocator. Static percpu area is allocated
2539 * page-by-page into vmalloc area.
2542 * 0 on success, -errno on failure.
2544 int __init pcpu_page_first_chunk(size_t reserved_size,
2545 pcpu_fc_alloc_fn_t alloc_fn,
2546 pcpu_fc_free_fn_t free_fn,
2547 pcpu_fc_populate_pte_fn_t populate_pte_fn)
2549 static struct vm_struct vm;
2550 struct pcpu_alloc_info *ai;
2554 struct page **pages;
2559 snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
2561 ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
2564 BUG_ON(ai->nr_groups != 1);
2565 upa = ai->alloc_size/ai->unit_size;
2566 nr_g0_units = roundup(num_possible_cpus(), upa);
2567 if (unlikely(WARN_ON(ai->groups[0].nr_units != nr_g0_units))) {
2568 pcpu_free_alloc_info(ai);
2572 unit_pages = ai->unit_size >> PAGE_SHIFT;
2574 /* unaligned allocations can't be freed, round up to page size */
2575 pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
2577 pages = memblock_virt_alloc(pages_size, 0);
2579 /* allocate pages */
2581 for (unit = 0; unit < num_possible_cpus(); unit++) {
2582 unsigned int cpu = ai->groups[0].cpu_map[unit];
2583 for (i = 0; i < unit_pages; i++) {
2586 ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
2588 pr_warn("failed to allocate %s page for cpu%u\n",
2592 /* kmemleak tracks the percpu allocations separately */
2594 pages[j++] = virt_to_page(ptr);
2598 /* allocate vm area, map the pages and copy static data */
2599 vm.flags = VM_ALLOC;
2600 vm.size = num_possible_cpus() * ai->unit_size;
2601 vm_area_register_early(&vm, PAGE_SIZE);
2603 for (unit = 0; unit < num_possible_cpus(); unit++) {
2604 unsigned long unit_addr =
2605 (unsigned long)vm.addr + unit * ai->unit_size;
2607 for (i = 0; i < unit_pages; i++)
2608 populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
2610 /* pte already populated, the following shouldn't fail */
2611 rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
2614 panic("failed to map percpu area, err=%d\n", rc);
2617 * FIXME: Archs with virtual cache should flush local
2618 * cache for the linear mapping here - something
2619 * equivalent to flush_cache_vmap() on the local cpu.
2620 * flush_cache_vmap() can't be used as most supporting
2621 * data structures are not set up yet.
2624 /* copy static data */
2625 memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
2628 /* we're ready, commit */
2629 pr_info("%d %s pages/cpu @%p s%zu r%zu d%zu\n",
2630 unit_pages, psize_str, vm.addr, ai->static_size,
2631 ai->reserved_size, ai->dyn_size);
2633 rc = pcpu_setup_first_chunk(ai, vm.addr);
2638 free_fn(page_address(pages[j]), PAGE_SIZE);
2641 memblock_free_early(__pa(pages), pages_size);
2642 pcpu_free_alloc_info(ai);
2645 #endif /* BUILD_PAGE_FIRST_CHUNK */
2647 #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
2649 * Generic SMP percpu area setup.
2651 * The embedding helper is used because its behavior closely resembles
2652 * the original non-dynamic generic percpu area setup. This is
2653 * important because many archs have addressing restrictions and might
2654 * fail if the percpu area is located far away from the previous
2655 * location. As an added bonus, in non-NUMA cases, embedding is
2656 * generally a good idea TLB-wise because percpu area can piggy back
2657 * on the physical linear memory mapping which uses large page
2658 * mappings on applicable archs.
2660 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
2661 EXPORT_SYMBOL(__per_cpu_offset);
2663 static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
2666 return memblock_virt_alloc_from_nopanic(
2667 size, align, __pa(MAX_DMA_ADDRESS));
2670 static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
2672 memblock_free_early(__pa(ptr), size);
2675 void __init setup_per_cpu_areas(void)
2677 unsigned long delta;
2682 * Always reserve area for module percpu variables. That's
2683 * what the legacy allocator did.
2685 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
2686 PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
2687 pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
2689 panic("Failed to initialize percpu areas.");
2691 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
2692 for_each_possible_cpu(cpu)
2693 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
2695 #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
2697 #else /* CONFIG_SMP */
2700 * UP percpu area setup.
2702 * UP always uses km-based percpu allocator with identity mapping.
2703 * Static percpu variables are indistinguishable from the usual static
2704 * variables and don't require any special preparation.
2706 void __init setup_per_cpu_areas(void)
2708 const size_t unit_size =
2709 roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
2710 PERCPU_DYNAMIC_RESERVE));
2711 struct pcpu_alloc_info *ai;
2714 ai = pcpu_alloc_alloc_info(1, 1);
2715 fc = memblock_virt_alloc_from_nopanic(unit_size,
2717 __pa(MAX_DMA_ADDRESS));
2719 panic("Failed to allocate memory for percpu areas.");
2720 /* kmemleak tracks the percpu allocations separately */
2723 ai->dyn_size = unit_size;
2724 ai->unit_size = unit_size;
2725 ai->atom_size = unit_size;
2726 ai->alloc_size = unit_size;
2727 ai->groups[0].nr_units = 1;
2728 ai->groups[0].cpu_map[0] = 0;
2730 if (pcpu_setup_first_chunk(ai, fc) < 0)
2731 panic("Failed to initialize percpu areas.");
2733 #warning "the CRIS architecture has physical and virtual addresses confused"
2735 pcpu_free_alloc_info(ai);
2739 #endif /* CONFIG_SMP */
2742 * Percpu allocator is initialized early during boot when neither slab or
2743 * workqueue is available. Plug async management until everything is up
2746 static int __init percpu_enable_async(void)
2748 pcpu_async_enabled = true;
2751 subsys_initcall(percpu_enable_async);