1 #ifndef _LINUX_MEMBLOCK_H
2 #define _LINUX_MEMBLOCK_H
6 * Logical memory blocks.
8 * Copyright (C) 2001 Peter Bergner, IBM Corp.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 #include <linux/init.h>
20 extern unsigned long max_low_pfn;
21 extern unsigned long min_low_pfn;
26 extern unsigned long max_pfn;
28 * highest possible page
30 extern unsigned long long max_possible_pfn;
33 * enum memblock_flags - definition of memory region attributes
34 * @MEMBLOCK_NONE: no special request
35 * @MEMBLOCK_HOTPLUG: hotpluggable region
36 * @MEMBLOCK_MIRROR: mirrored region
37 * @MEMBLOCK_NOMAP: don't add to kernel direct mapping
40 MEMBLOCK_NONE = 0x0, /* No special request */
41 MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */
42 MEMBLOCK_MIRROR = 0x2, /* mirrored region */
43 MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */
47 * struct memblock_region - represents a memory region
48 * @base: physical address of the region
49 * @size: size of the region
50 * @flags: memory region attributes
53 struct memblock_region {
56 enum memblock_flags flags;
57 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
63 * struct memblock_type - collection of memory regions of certain type
64 * @cnt: number of regions
65 * @max: size of the allocated array
66 * @total_size: size of all regions
67 * @regions: array of regions
68 * @name: the memory type symbolic name
70 struct memblock_type {
73 phys_addr_t total_size;
74 struct memblock_region *regions;
79 * struct memblock - memblock allocator metadata
80 * @bottom_up: is bottom up direction?
81 * @current_limit: physical address of the current allocation limit
82 * @memory: usabe memory regions
83 * @reserved: reserved memory regions
84 * @physmem: all physical memory
87 bool bottom_up; /* is bottom up direction? */
88 phys_addr_t current_limit;
89 struct memblock_type memory;
90 struct memblock_type reserved;
91 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
92 struct memblock_type physmem;
96 extern struct memblock memblock;
97 extern int memblock_debug;
99 #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
100 #define __init_memblock __meminit
101 #define __initdata_memblock __meminitdata
102 void memblock_discard(void);
104 #define __init_memblock
105 #define __initdata_memblock
108 #define memblock_dbg(fmt, ...) \
109 if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
111 phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align,
112 phys_addr_t start, phys_addr_t end,
113 int nid, enum memblock_flags flags);
114 phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
115 phys_addr_t size, phys_addr_t align);
116 void memblock_allow_resize(void);
117 int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
118 int memblock_add(phys_addr_t base, phys_addr_t size);
119 int memblock_remove(phys_addr_t base, phys_addr_t size);
120 int memblock_free(phys_addr_t base, phys_addr_t size);
121 int memblock_reserve(phys_addr_t base, phys_addr_t size);
122 void memblock_trim_memory(phys_addr_t align);
123 bool memblock_overlaps_region(struct memblock_type *type,
124 phys_addr_t base, phys_addr_t size);
125 int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
126 int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
127 int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
128 int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
129 int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
130 enum memblock_flags choose_memblock_flags(void);
132 unsigned long memblock_free_all(void);
133 void reset_node_managed_pages(pg_data_t *pgdat);
134 void reset_all_zones_managed_pages(void);
136 /* Low level functions */
137 int memblock_add_range(struct memblock_type *type,
138 phys_addr_t base, phys_addr_t size,
139 int nid, enum memblock_flags flags);
141 void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
142 struct memblock_type *type_a,
143 struct memblock_type *type_b, phys_addr_t *out_start,
144 phys_addr_t *out_end, int *out_nid);
146 void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
147 struct memblock_type *type_a,
148 struct memblock_type *type_b, phys_addr_t *out_start,
149 phys_addr_t *out_end, int *out_nid);
151 void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
152 phys_addr_t *out_end);
154 void __memblock_free_late(phys_addr_t base, phys_addr_t size);
157 * for_each_mem_range - iterate through memblock areas from type_a and not
158 * included in type_b. Or just type_a if type_b is NULL.
159 * @i: u64 used as loop variable
160 * @type_a: ptr to memblock_type to iterate
161 * @type_b: ptr to memblock_type which excludes from the iteration
162 * @nid: node selector, %NUMA_NO_NODE for all nodes
163 * @flags: pick from blocks based on memory attributes
164 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
165 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
166 * @p_nid: ptr to int for nid of the range, can be %NULL
168 #define for_each_mem_range(i, type_a, type_b, nid, flags, \
169 p_start, p_end, p_nid) \
170 for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \
171 p_start, p_end, p_nid); \
172 i != (u64)ULLONG_MAX; \
173 __next_mem_range(&i, nid, flags, type_a, type_b, \
174 p_start, p_end, p_nid))
177 * for_each_mem_range_rev - reverse iterate through memblock areas from
178 * type_a and not included in type_b. Or just type_a if type_b is NULL.
179 * @i: u64 used as loop variable
180 * @type_a: ptr to memblock_type to iterate
181 * @type_b: ptr to memblock_type which excludes from the iteration
182 * @nid: node selector, %NUMA_NO_NODE for all nodes
183 * @flags: pick from blocks based on memory attributes
184 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
185 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
186 * @p_nid: ptr to int for nid of the range, can be %NULL
188 #define for_each_mem_range_rev(i, type_a, type_b, nid, flags, \
189 p_start, p_end, p_nid) \
190 for (i = (u64)ULLONG_MAX, \
191 __next_mem_range_rev(&i, nid, flags, type_a, type_b,\
192 p_start, p_end, p_nid); \
193 i != (u64)ULLONG_MAX; \
194 __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
195 p_start, p_end, p_nid))
198 * for_each_reserved_mem_region - iterate over all reserved memblock areas
199 * @i: u64 used as loop variable
200 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
201 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
203 * Walks over reserved areas of memblock. Available as soon as memblock
206 #define for_each_reserved_mem_region(i, p_start, p_end) \
207 for (i = 0UL, __next_reserved_mem_region(&i, p_start, p_end); \
208 i != (u64)ULLONG_MAX; \
209 __next_reserved_mem_region(&i, p_start, p_end))
211 static inline bool memblock_is_hotpluggable(struct memblock_region *m)
213 return m->flags & MEMBLOCK_HOTPLUG;
216 static inline bool memblock_is_mirror(struct memblock_region *m)
218 return m->flags & MEMBLOCK_MIRROR;
221 static inline bool memblock_is_nomap(struct memblock_region *m)
223 return m->flags & MEMBLOCK_NOMAP;
226 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
227 int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
228 unsigned long *end_pfn);
229 void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
230 unsigned long *out_end_pfn, int *out_nid);
233 * for_each_mem_pfn_range - early memory pfn range iterator
234 * @i: an integer used as loop variable
235 * @nid: node selector, %MAX_NUMNODES for all nodes
236 * @p_start: ptr to ulong for start pfn of the range, can be %NULL
237 * @p_end: ptr to ulong for end pfn of the range, can be %NULL
238 * @p_nid: ptr to int for nid of the range, can be %NULL
240 * Walks over configured memory ranges.
242 #define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
243 for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
244 i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
245 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
248 * for_each_free_mem_range - iterate through free memblock areas
249 * @i: u64 used as loop variable
250 * @nid: node selector, %NUMA_NO_NODE for all nodes
251 * @flags: pick from blocks based on memory attributes
252 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
253 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
254 * @p_nid: ptr to int for nid of the range, can be %NULL
256 * Walks over free (memory && !reserved) areas of memblock. Available as
257 * soon as memblock is initialized.
259 #define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \
260 for_each_mem_range(i, &memblock.memory, &memblock.reserved, \
261 nid, flags, p_start, p_end, p_nid)
264 * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
265 * @i: u64 used as loop variable
266 * @nid: node selector, %NUMA_NO_NODE for all nodes
267 * @flags: pick from blocks based on memory attributes
268 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
269 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
270 * @p_nid: ptr to int for nid of the range, can be %NULL
272 * Walks over free (memory && !reserved) areas of memblock in reverse
273 * order. Available as soon as memblock is initialized.
275 #define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \
277 for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
278 nid, flags, p_start, p_end, p_nid)
280 static inline void memblock_set_region_flags(struct memblock_region *r,
281 enum memblock_flags flags)
286 static inline void memblock_clear_region_flags(struct memblock_region *r,
287 enum memblock_flags flags)
292 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
293 int memblock_set_node(phys_addr_t base, phys_addr_t size,
294 struct memblock_type *type, int nid);
296 static inline void memblock_set_region_node(struct memblock_region *r, int nid)
301 static inline int memblock_get_region_node(const struct memblock_region *r)
306 static inline void memblock_set_region_node(struct memblock_region *r, int nid)
310 static inline int memblock_get_region_node(const struct memblock_region *r)
314 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
316 /* Flags for memblock allocation APIs */
317 #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
318 #define MEMBLOCK_ALLOC_ACCESSIBLE 0
319 #define MEMBLOCK_ALLOC_KASAN 1
321 /* We are using top down, so it is safe to use 0 here */
322 #define MEMBLOCK_LOW_LIMIT 0
324 #ifndef ARCH_LOW_ADDRESS_LIMIT
325 #define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
328 phys_addr_t memblock_phys_alloc_nid(phys_addr_t size, phys_addr_t align, int nid);
329 phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
331 phys_addr_t memblock_phys_alloc(phys_addr_t size, phys_addr_t align);
333 void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
334 phys_addr_t min_addr, phys_addr_t max_addr,
336 void *memblock_alloc_try_nid_nopanic(phys_addr_t size, phys_addr_t align,
337 phys_addr_t min_addr, phys_addr_t max_addr,
339 void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
340 phys_addr_t min_addr, phys_addr_t max_addr,
343 static inline void * __init memblock_alloc(phys_addr_t size, phys_addr_t align)
345 return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
346 MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
349 static inline void * __init memblock_alloc_raw(phys_addr_t size,
352 return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT,
353 MEMBLOCK_ALLOC_ACCESSIBLE,
357 static inline void * __init memblock_alloc_from(phys_addr_t size,
359 phys_addr_t min_addr)
361 return memblock_alloc_try_nid(size, align, min_addr,
362 MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
365 static inline void * __init memblock_alloc_nopanic(phys_addr_t size,
368 return memblock_alloc_try_nid_nopanic(size, align, MEMBLOCK_LOW_LIMIT,
369 MEMBLOCK_ALLOC_ACCESSIBLE,
373 static inline void * __init memblock_alloc_low(phys_addr_t size,
376 return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
377 ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
379 static inline void * __init memblock_alloc_low_nopanic(phys_addr_t size,
382 return memblock_alloc_try_nid_nopanic(size, align, MEMBLOCK_LOW_LIMIT,
383 ARCH_LOW_ADDRESS_LIMIT,
387 static inline void * __init memblock_alloc_from_nopanic(phys_addr_t size,
389 phys_addr_t min_addr)
391 return memblock_alloc_try_nid_nopanic(size, align, min_addr,
392 MEMBLOCK_ALLOC_ACCESSIBLE,
396 static inline void * __init memblock_alloc_node(phys_addr_t size,
397 phys_addr_t align, int nid)
399 return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
400 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
403 static inline void * __init memblock_alloc_node_nopanic(phys_addr_t size,
406 return memblock_alloc_try_nid_nopanic(size, SMP_CACHE_BYTES,
408 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
411 static inline void __init memblock_free_early(phys_addr_t base,
414 memblock_free(base, size);
417 static inline void __init memblock_free_early_nid(phys_addr_t base,
418 phys_addr_t size, int nid)
420 memblock_free(base, size);
423 static inline void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
425 __memblock_free_late(base, size);
429 * Set the allocation direction to bottom-up or top-down.
431 static inline void __init memblock_set_bottom_up(bool enable)
433 memblock.bottom_up = enable;
437 * Check if the allocation direction is bottom-up or not.
438 * if this is true, that said, memblock will allocate memory
439 * in bottom-up direction.
441 static inline bool memblock_bottom_up(void)
443 return memblock.bottom_up;
446 phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
447 phys_addr_t start, phys_addr_t end,
448 enum memblock_flags flags);
449 phys_addr_t memblock_alloc_base_nid(phys_addr_t size,
450 phys_addr_t align, phys_addr_t max_addr,
451 int nid, enum memblock_flags flags);
452 phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
453 phys_addr_t max_addr);
454 phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
455 phys_addr_t max_addr);
456 phys_addr_t memblock_phys_mem_size(void);
457 phys_addr_t memblock_reserved_size(void);
458 phys_addr_t memblock_mem_size(unsigned long limit_pfn);
459 phys_addr_t memblock_start_of_DRAM(void);
460 phys_addr_t memblock_end_of_DRAM(void);
461 void memblock_enforce_memory_limit(phys_addr_t memory_limit);
462 void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size);
463 void memblock_mem_limit_remove_map(phys_addr_t limit);
464 bool memblock_is_memory(phys_addr_t addr);
465 bool memblock_is_map_memory(phys_addr_t addr);
466 bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
467 bool memblock_is_reserved(phys_addr_t addr);
468 bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
470 extern void __memblock_dump_all(void);
472 static inline void memblock_dump_all(void)
475 __memblock_dump_all();
479 * memblock_set_current_limit - Set the current allocation limit to allow
480 * limiting allocations to what is currently
481 * accessible during boot
482 * @limit: New limit value (physical address)
484 void memblock_set_current_limit(phys_addr_t limit);
487 phys_addr_t memblock_get_current_limit(void);
490 * pfn conversion functions
492 * While the memory MEMBLOCKs should always be page aligned, the reserved
493 * MEMBLOCKs may not be. This accessor attempt to provide a very clear
494 * idea of what they return for such non aligned MEMBLOCKs.
498 * memblock_region_memory_base_pfn - get the lowest pfn of the memory region
499 * @reg: memblock_region structure
501 * Return: the lowest pfn intersecting with the memory region
503 static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
505 return PFN_UP(reg->base);
509 * memblock_region_memory_end_pfn - get the end pfn of the memory region
510 * @reg: memblock_region structure
512 * Return: the end_pfn of the reserved region
514 static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
516 return PFN_DOWN(reg->base + reg->size);
520 * memblock_region_reserved_base_pfn - get the lowest pfn of the reserved region
521 * @reg: memblock_region structure
523 * Return: the lowest pfn intersecting with the reserved region
525 static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
527 return PFN_DOWN(reg->base);
531 * memblock_region_reserved_end_pfn - get the end pfn of the reserved region
532 * @reg: memblock_region structure
534 * Return: the end_pfn of the reserved region
536 static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
538 return PFN_UP(reg->base + reg->size);
541 #define for_each_memblock(memblock_type, region) \
542 for (region = memblock.memblock_type.regions; \
543 region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \
546 #define for_each_memblock_type(i, memblock_type, rgn) \
547 for (i = 0, rgn = &memblock_type->regions[0]; \
548 i < memblock_type->cnt; \
549 i++, rgn = &memblock_type->regions[i])
551 extern void *alloc_large_system_hash(const char *tablename,
552 unsigned long bucketsize,
553 unsigned long numentries,
556 unsigned int *_hash_shift,
557 unsigned int *_hash_mask,
558 unsigned long low_limit,
559 unsigned long high_limit);
561 #define HASH_EARLY 0x00000001 /* Allocating during early boot? */
562 #define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min
563 * shift passed via *_hash_shift */
564 #define HASH_ZERO 0x00000004 /* Zero allocated hash table */
566 /* Only NUMA needs hash distribution. 64bit NUMA architectures have
567 * sufficient vmalloc space.
570 #define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
571 extern int hashdist; /* Distribute hashes across NUMA nodes? */
576 #ifdef CONFIG_MEMTEST
577 extern void early_memtest(phys_addr_t start, phys_addr_t end);
579 static inline void early_memtest(phys_addr_t start, phys_addr_t end)
584 #endif /* __KERNEL__ */
586 #endif /* _LINUX_MEMBLOCK_H */