1 // SPDX-License-Identifier: GPL-2.0
5 * This contains the routines needed to generate a reasonable level of
6 * entropy to choose a randomized kernel base address offset in support
7 * of Kernel Address Space Layout Randomization (KASLR). Additionally
8 * handles walking the physical memory maps (and tracking memory regions
9 * to avoid) in order to select a physical memory location that can
10 * contain the entire properly aligned running kernel image.
15 * isspace() in linux/ctype.h is expected by next_args() to filter
16 * out "space/lf/tab". While boot/ctype.h conflicts with linux/ctype.h,
17 * since isdigit() is implemented in both of them. Hence disable it
23 * _ctype[] in lib/ctype.c is needed by isspace() of linux/ctype.h.
24 * While both lib/ctype.c and lib/cmdline.c will bring EXPORT_SYMBOL
25 * which is meaningless and will cause compiling error in some cases.
26 * So do not include linux/export.h and define EXPORT_SYMBOL(sym)
29 #define _LINUX_EXPORT_H
30 #define EXPORT_SYMBOL(sym)
34 #include "../string.h"
36 #include <generated/compile.h>
37 #include <linux/module.h>
38 #include <linux/uts.h>
39 #include <linux/utsname.h>
40 #include <linux/ctype.h>
41 #include <linux/efi.h>
42 #include <generated/utsrelease.h>
45 /* Macros used by the included decompressor code below. */
47 #include <linux/decompress/mm.h>
49 #ifdef CONFIG_X86_5LEVEL
50 unsigned int __pgtable_l5_enabled;
51 unsigned int pgdir_shift __ro_after_init = 39;
52 unsigned int ptrs_per_p4d __ro_after_init = 1;
55 extern unsigned long get_cmd_line_ptr(void);
57 /* Used by PAGE_KERN* macros: */
58 pteval_t __default_kernel_pte_mask __read_mostly = ~0;
60 /* Simplified build-specific string for starting entropy. */
61 static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@"
62 LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION;
64 static unsigned long rotate_xor(unsigned long hash, const void *area,
68 unsigned long *ptr = (unsigned long *)area;
70 for (i = 0; i < size / sizeof(hash); i++) {
71 /* Rotate by odd number of bits and XOR. */
72 hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7);
79 /* Attempt to create a simple but unpredictable starting entropy. */
80 static unsigned long get_boot_seed(void)
82 unsigned long hash = 0;
84 hash = rotate_xor(hash, build_str, sizeof(build_str));
85 hash = rotate_xor(hash, boot_params, sizeof(*boot_params));
90 #define KASLR_COMPRESSED_BOOT
91 #include "../../lib/kaslr.c"
94 unsigned long long start;
95 unsigned long long size;
98 /* Only supporting at most 4 unusable memmap regions with kaslr */
99 #define MAX_MEMMAP_REGIONS 4
101 static bool memmap_too_large;
104 /* Store memory limit specified by "mem=nn[KMG]" or "memmap=nn[KMG]" */
105 static unsigned long long mem_limit = ULLONG_MAX;
108 enum mem_avoid_index {
109 MEM_AVOID_ZO_RANGE = 0,
112 MEM_AVOID_BOOTPARAMS,
113 MEM_AVOID_MEMMAP_BEGIN,
114 MEM_AVOID_MEMMAP_END = MEM_AVOID_MEMMAP_BEGIN + MAX_MEMMAP_REGIONS - 1,
118 static struct mem_vector mem_avoid[MEM_AVOID_MAX];
120 static bool mem_overlaps(struct mem_vector *one, struct mem_vector *two)
122 /* Item one is entirely before item two. */
123 if (one->start + one->size <= two->start)
125 /* Item one is entirely after item two. */
126 if (one->start >= two->start + two->size)
131 char *skip_spaces(const char *str)
133 while (isspace(*str))
137 #include "../../../../lib/ctype.c"
138 #include "../../../../lib/cmdline.c"
141 parse_memmap(char *p, unsigned long long *start, unsigned long long *size)
148 /* We don't care about this option here */
149 if (!strncmp(p, "exactmap", 8))
153 *size = memparse(p, &p);
161 *start = memparse(p + 1, &p);
164 /* memmap=nn@ss specifies usable region, should be skipped */
169 * If w/o offset, only size specified, memmap=nn[KMG] has the
170 * same behaviour as mem=nn[KMG]. It limits the max address
171 * system can use. Region above the limit should be avoided.
180 static void mem_avoid_memmap(char *str)
184 if (i >= MAX_MEMMAP_REGIONS)
187 while (str && (i < MAX_MEMMAP_REGIONS)) {
189 unsigned long long start, size;
190 char *k = strchr(str, ',');
195 rc = parse_memmap(str, &start, &size);
201 /* Store the specified memory limit if size > 0 */
208 mem_avoid[MEM_AVOID_MEMMAP_BEGIN + i].start = start;
209 mem_avoid[MEM_AVOID_MEMMAP_BEGIN + i].size = size;
213 /* More than 4 memmaps, fail kaslr */
214 if ((i >= MAX_MEMMAP_REGIONS) && str)
215 memmap_too_large = true;
218 /* Store the number of 1GB huge pages which users specified: */
219 static unsigned long max_gb_huge_pages;
221 static void parse_gb_huge_pages(char *param, char *val)
223 static bool gbpage_sz;
226 if (!strcmp(param, "hugepagesz")) {
228 if (memparse(p, &p) != PUD_SIZE) {
234 warn("Repeatedly set hugeTLB page size of 1G!\n");
239 if (!strcmp(param, "hugepages") && gbpage_sz) {
241 max_gb_huge_pages = simple_strtoull(p, &p, 0);
247 static int handle_mem_options(void)
249 char *args = (char *)get_cmd_line_ptr();
250 size_t len = strlen((char *)args);
255 if (!strstr(args, "memmap=") && !strstr(args, "mem=") &&
256 !strstr(args, "hugepages"))
259 tmp_cmdline = malloc(len + 1);
261 error("Failed to allocate space for tmp_cmdline");
263 memcpy(tmp_cmdline, args, len);
264 tmp_cmdline[len] = 0;
267 /* Chew leading spaces */
268 args = skip_spaces(args);
271 args = next_arg(args, ¶m, &val);
273 if (!val && strcmp(param, "--") == 0) {
274 warn("Only '--' specified in cmdline");
279 if (!strcmp(param, "memmap")) {
280 mem_avoid_memmap(val);
281 } else if (strstr(param, "hugepages")) {
282 parse_gb_huge_pages(param, val);
283 } else if (!strcmp(param, "mem")) {
286 if (!strcmp(p, "nopentium"))
288 mem_size = memparse(p, &p);
293 mem_limit = mem_size;
302 * In theory, KASLR can put the kernel anywhere in the range of [16M, 64T).
303 * The mem_avoid array is used to store the ranges that need to be avoided
304 * when KASLR searches for an appropriate random address. We must avoid any
305 * regions that are unsafe to overlap with during decompression, and other
306 * things like the initrd, cmdline and boot_params. This comment seeks to
307 * explain mem_avoid as clearly as possible since incorrect mem_avoid
308 * memory ranges lead to really hard to debug boot failures.
310 * The initrd, cmdline, and boot_params are trivial to identify for
311 * avoiding. They are MEM_AVOID_INITRD, MEM_AVOID_CMDLINE, and
312 * MEM_AVOID_BOOTPARAMS respectively below.
314 * What is not obvious how to avoid is the range of memory that is used
315 * during decompression (MEM_AVOID_ZO_RANGE below). This range must cover
316 * the compressed kernel (ZO) and its run space, which is used to extract
317 * the uncompressed kernel (VO) and relocs.
319 * ZO's full run size sits against the end of the decompression buffer, so
320 * we can calculate where text, data, bss, etc of ZO are positioned more
323 * For additional background, the decompression calculations can be found
324 * in header.S, and the memory diagram is based on the one found in misc.c.
326 * The following conditions are already enforced by the image layouts and
328 * - input + input_size >= output + output_size
329 * - kernel_total_size <= init_size
330 * - kernel_total_size <= output_size (see Note below)
331 * - output + init_size >= output + output_size
333 * (Note that kernel_total_size and output_size have no fundamental
334 * relationship, but output_size is passed to choose_random_location
335 * as a maximum of the two. The diagram is showing a case where
336 * kernel_total_size is larger than output_size, but this case is
337 * handled by bumping output_size.)
339 * The above conditions can be illustrated by a diagram:
341 * 0 output input input+input_size output+init_size
344 * |-----|--------|--------|--------------|-----------|--|-------------|
347 * output+init_size-ZO_INIT_SIZE output+output_size output+kernel_total_size
349 * [output, output+init_size) is the entire memory range used for
350 * extracting the compressed image.
352 * [output, output+kernel_total_size) is the range needed for the
353 * uncompressed kernel (VO) and its run size (bss, brk, etc).
355 * [output, output+output_size) is VO plus relocs (i.e. the entire
356 * uncompressed payload contained by ZO). This is the area of the buffer
357 * written to during decompression.
359 * [output+init_size-ZO_INIT_SIZE, output+init_size) is the worst-case
360 * range of the copied ZO and decompression code. (i.e. the range
361 * covered backwards of size ZO_INIT_SIZE, starting from output+init_size.)
363 * [input, input+input_size) is the original copied compressed image (ZO)
364 * (i.e. it does not include its run size). This range must be avoided
365 * because it contains the data used for decompression.
367 * [input+input_size, output+init_size) is [_text, _end) for ZO. This
368 * range includes ZO's heap and stack, and must be avoided since it
369 * performs the decompression.
371 * Since the above two ranges need to be avoided and they are adjacent,
372 * they can be merged, resulting in: [input, output+init_size) which
373 * becomes the MEM_AVOID_ZO_RANGE below.
375 static void mem_avoid_init(unsigned long input, unsigned long input_size,
376 unsigned long output)
378 unsigned long init_size = boot_params->hdr.init_size;
379 u64 initrd_start, initrd_size;
380 u64 cmd_line, cmd_line_size;
384 * Avoid the region that is unsafe to overlap during
387 mem_avoid[MEM_AVOID_ZO_RANGE].start = input;
388 mem_avoid[MEM_AVOID_ZO_RANGE].size = (output + init_size) - input;
389 add_identity_map(mem_avoid[MEM_AVOID_ZO_RANGE].start,
390 mem_avoid[MEM_AVOID_ZO_RANGE].size);
393 initrd_start = (u64)boot_params->ext_ramdisk_image << 32;
394 initrd_start |= boot_params->hdr.ramdisk_image;
395 initrd_size = (u64)boot_params->ext_ramdisk_size << 32;
396 initrd_size |= boot_params->hdr.ramdisk_size;
397 mem_avoid[MEM_AVOID_INITRD].start = initrd_start;
398 mem_avoid[MEM_AVOID_INITRD].size = initrd_size;
399 /* No need to set mapping for initrd, it will be handled in VO. */
401 /* Avoid kernel command line. */
402 cmd_line = (u64)boot_params->ext_cmd_line_ptr << 32;
403 cmd_line |= boot_params->hdr.cmd_line_ptr;
404 /* Calculate size of cmd_line. */
405 ptr = (char *)(unsigned long)cmd_line;
406 for (cmd_line_size = 0; ptr[cmd_line_size++];)
408 mem_avoid[MEM_AVOID_CMDLINE].start = cmd_line;
409 mem_avoid[MEM_AVOID_CMDLINE].size = cmd_line_size;
410 add_identity_map(mem_avoid[MEM_AVOID_CMDLINE].start,
411 mem_avoid[MEM_AVOID_CMDLINE].size);
413 /* Avoid boot parameters. */
414 mem_avoid[MEM_AVOID_BOOTPARAMS].start = (unsigned long)boot_params;
415 mem_avoid[MEM_AVOID_BOOTPARAMS].size = sizeof(*boot_params);
416 add_identity_map(mem_avoid[MEM_AVOID_BOOTPARAMS].start,
417 mem_avoid[MEM_AVOID_BOOTPARAMS].size);
419 /* We don't need to set a mapping for setup_data. */
421 /* Mark the memmap regions we need to avoid */
422 handle_mem_options();
424 #ifdef CONFIG_X86_VERBOSE_BOOTUP
425 /* Make sure video RAM can be used. */
426 add_identity_map(0, PMD_SIZE);
431 * Does this memory vector overlap a known avoided area? If so, record the
432 * overlap region with the lowest address.
434 static bool mem_avoid_overlap(struct mem_vector *img,
435 struct mem_vector *overlap)
438 struct setup_data *ptr;
439 unsigned long earliest = img->start + img->size;
440 bool is_overlapping = false;
442 for (i = 0; i < MEM_AVOID_MAX; i++) {
443 if (mem_overlaps(img, &mem_avoid[i]) &&
444 mem_avoid[i].start < earliest) {
445 *overlap = mem_avoid[i];
446 earliest = overlap->start;
447 is_overlapping = true;
451 /* Avoid all entries in the setup_data linked list. */
452 ptr = (struct setup_data *)(unsigned long)boot_params->hdr.setup_data;
454 struct mem_vector avoid;
456 avoid.start = (unsigned long)ptr;
457 avoid.size = sizeof(*ptr) + ptr->len;
459 if (mem_overlaps(img, &avoid) && (avoid.start < earliest)) {
461 earliest = overlap->start;
462 is_overlapping = true;
465 ptr = (struct setup_data *)(unsigned long)ptr->next;
468 return is_overlapping;
476 #define MAX_SLOT_AREA 100
478 static struct slot_area slot_areas[MAX_SLOT_AREA];
480 static unsigned long slot_max;
482 static unsigned long slot_area_index;
484 static void store_slot_info(struct mem_vector *region, unsigned long image_size)
486 struct slot_area slot_area;
488 if (slot_area_index == MAX_SLOT_AREA)
491 slot_area.addr = region->start;
492 slot_area.num = (region->size - image_size) /
493 CONFIG_PHYSICAL_ALIGN + 1;
495 if (slot_area.num > 0) {
496 slot_areas[slot_area_index++] = slot_area;
497 slot_max += slot_area.num;
502 * Skip as many 1GB huge pages as possible in the passed region
503 * according to the number which users specified:
506 process_gb_huge_pages(struct mem_vector *region, unsigned long image_size)
508 unsigned long addr, size = 0;
509 struct mem_vector tmp;
512 if (!max_gb_huge_pages) {
513 store_slot_info(region, image_size);
517 addr = ALIGN(region->start, PUD_SIZE);
518 /* Did we raise the address above the passed in memory entry? */
519 if (addr < region->start + region->size)
520 size = region->size - (addr - region->start);
522 /* Check how many 1GB huge pages can be filtered out: */
523 while (size > PUD_SIZE && max_gb_huge_pages) {
529 /* No good 1GB huge pages found: */
531 store_slot_info(region, image_size);
536 * Skip those 'i'*1GB good huge pages, and continue checking and
537 * processing the remaining head or tail part of the passed region
541 if (addr >= region->start + image_size) {
542 tmp.start = region->start;
543 tmp.size = addr - region->start;
544 store_slot_info(&tmp, image_size);
547 size = region->size - (addr - region->start) - i * PUD_SIZE;
548 if (size >= image_size) {
549 tmp.start = addr + i * PUD_SIZE;
551 store_slot_info(&tmp, image_size);
555 static unsigned long slots_fetch_random(void)
560 /* Handle case of no slots stored. */
564 slot = kaslr_get_random_long("Physical") % slot_max;
566 for (i = 0; i < slot_area_index; i++) {
567 if (slot >= slot_areas[i].num) {
568 slot -= slot_areas[i].num;
571 return slot_areas[i].addr + slot * CONFIG_PHYSICAL_ALIGN;
574 if (i == slot_area_index)
575 debug_putstr("slots_fetch_random() failed!?\n");
579 static void process_mem_region(struct mem_vector *entry,
580 unsigned long minimum,
581 unsigned long image_size)
583 struct mem_vector region, overlap;
584 struct slot_area slot_area;
585 unsigned long start_orig, end;
586 struct mem_vector cur_entry;
588 /* On 32-bit, ignore entries entirely above our maximum. */
589 if (IS_ENABLED(CONFIG_X86_32) && entry->start >= KERNEL_IMAGE_SIZE)
592 /* Ignore entries entirely below our minimum. */
593 if (entry->start + entry->size < minimum)
596 /* Ignore entries above memory limit */
597 end = min(entry->size + entry->start, mem_limit);
598 if (entry->start >= end)
600 cur_entry.start = entry->start;
601 cur_entry.size = end - entry->start;
603 region.start = cur_entry.start;
604 region.size = cur_entry.size;
606 /* Give up if slot area array is full. */
607 while (slot_area_index < MAX_SLOT_AREA) {
608 start_orig = region.start;
610 /* Potentially raise address to minimum location. */
611 if (region.start < minimum)
612 region.start = minimum;
614 /* Potentially raise address to meet alignment needs. */
615 region.start = ALIGN(region.start, CONFIG_PHYSICAL_ALIGN);
617 /* Did we raise the address above the passed in memory entry? */
618 if (region.start > cur_entry.start + cur_entry.size)
621 /* Reduce size by any delta from the original address. */
622 region.size -= region.start - start_orig;
624 /* On 32-bit, reduce region size to fit within max size. */
625 if (IS_ENABLED(CONFIG_X86_32) &&
626 region.start + region.size > KERNEL_IMAGE_SIZE)
627 region.size = KERNEL_IMAGE_SIZE - region.start;
629 /* Return if region can't contain decompressed kernel */
630 if (region.size < image_size)
633 /* If nothing overlaps, store the region and return. */
634 if (!mem_avoid_overlap(®ion, &overlap)) {
635 process_gb_huge_pages(®ion, image_size);
639 /* Store beginning of region if holds at least image_size. */
640 if (overlap.start > region.start + image_size) {
641 struct mem_vector beginning;
643 beginning.start = region.start;
644 beginning.size = overlap.start - region.start;
645 process_gb_huge_pages(&beginning, image_size);
648 /* Return if overlap extends to or past end of region. */
649 if (overlap.start + overlap.size >= region.start + region.size)
652 /* Clip off the overlapping region and start over. */
653 region.size -= overlap.start - region.start + overlap.size;
654 region.start = overlap.start + overlap.size;
660 * Returns true if mirror region found (and must have been processed
664 process_efi_entries(unsigned long minimum, unsigned long image_size)
666 struct efi_info *e = &boot_params->efi_info;
667 bool efi_mirror_found = false;
668 struct mem_vector region;
669 efi_memory_desc_t *md;
675 signature = (char *)&e->efi_loader_signature;
676 if (strncmp(signature, EFI32_LOADER_SIGNATURE, 4) &&
677 strncmp(signature, EFI64_LOADER_SIGNATURE, 4))
681 /* Can't handle data above 4GB at this time */
682 if (e->efi_memmap_hi) {
683 warn("EFI memmap is above 4GB, can't be handled now on x86_32. EFI should be disabled.\n");
686 pmap = e->efi_memmap;
688 pmap = (e->efi_memmap | ((__u64)e->efi_memmap_hi << 32));
691 nr_desc = e->efi_memmap_size / e->efi_memdesc_size;
692 for (i = 0; i < nr_desc; i++) {
693 md = efi_early_memdesc_ptr(pmap, e->efi_memdesc_size, i);
694 if (md->attribute & EFI_MEMORY_MORE_RELIABLE) {
695 efi_mirror_found = true;
700 for (i = 0; i < nr_desc; i++) {
701 md = efi_early_memdesc_ptr(pmap, e->efi_memdesc_size, i);
704 * Here we are more conservative in picking free memory than
705 * the EFI spec allows:
707 * According to the spec, EFI_BOOT_SERVICES_{CODE|DATA} are also
708 * free memory and thus available to place the kernel image into,
709 * but in practice there's firmware where using that memory leads
712 * Only EFI_CONVENTIONAL_MEMORY is guaranteed to be free.
714 if (md->type != EFI_CONVENTIONAL_MEMORY)
717 if (efi_mirror_found &&
718 !(md->attribute & EFI_MEMORY_MORE_RELIABLE))
721 region.start = md->phys_addr;
722 region.size = md->num_pages << EFI_PAGE_SHIFT;
723 process_mem_region(®ion, minimum, image_size);
724 if (slot_area_index == MAX_SLOT_AREA) {
725 debug_putstr("Aborted EFI scan (slot_areas full)!\n");
733 process_efi_entries(unsigned long minimum, unsigned long image_size)
739 static void process_e820_entries(unsigned long minimum,
740 unsigned long image_size)
743 struct mem_vector region;
744 struct boot_e820_entry *entry;
746 /* Verify potential e820 positions, appending to slots list. */
747 for (i = 0; i < boot_params->e820_entries; i++) {
748 entry = &boot_params->e820_table[i];
749 /* Skip non-RAM entries. */
750 if (entry->type != E820_TYPE_RAM)
752 region.start = entry->addr;
753 region.size = entry->size;
754 process_mem_region(®ion, minimum, image_size);
755 if (slot_area_index == MAX_SLOT_AREA) {
756 debug_putstr("Aborted e820 scan (slot_areas full)!\n");
762 static unsigned long find_random_phys_addr(unsigned long minimum,
763 unsigned long image_size)
765 /* Check if we had too many memmaps. */
766 if (memmap_too_large) {
767 debug_putstr("Aborted memory entries scan (more than 4 memmap= args)!\n");
771 /* Make sure minimum is aligned. */
772 minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN);
774 if (process_efi_entries(minimum, image_size))
775 return slots_fetch_random();
777 process_e820_entries(minimum, image_size);
778 return slots_fetch_random();
781 static unsigned long find_random_virt_addr(unsigned long minimum,
782 unsigned long image_size)
784 unsigned long slots, random_addr;
786 /* Make sure minimum is aligned. */
787 minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN);
788 /* Align image_size for easy slot calculations. */
789 image_size = ALIGN(image_size, CONFIG_PHYSICAL_ALIGN);
792 * There are how many CONFIG_PHYSICAL_ALIGN-sized slots
793 * that can hold image_size within the range of minimum to
796 slots = (KERNEL_IMAGE_SIZE - minimum - image_size) /
797 CONFIG_PHYSICAL_ALIGN + 1;
799 random_addr = kaslr_get_random_long("Virtual") % slots;
801 return random_addr * CONFIG_PHYSICAL_ALIGN + minimum;
805 * Since this function examines addresses much more numerically,
806 * it takes the input and output pointers as 'unsigned long'.
808 void choose_random_location(unsigned long input,
809 unsigned long input_size,
810 unsigned long *output,
811 unsigned long output_size,
812 unsigned long *virt_addr)
814 unsigned long random_addr, min_addr;
816 if (cmdline_find_option_bool("nokaslr")) {
817 warn("KASLR disabled: 'nokaslr' on cmdline.");
821 #ifdef CONFIG_X86_5LEVEL
822 if (__read_cr4() & X86_CR4_LA57) {
823 __pgtable_l5_enabled = 1;
829 boot_params->hdr.loadflags |= KASLR_FLAG;
831 /* Prepare to add new identity pagetables on demand. */
832 initialize_identity_maps();
834 /* Record the various known unsafe memory ranges. */
835 mem_avoid_init(input, input_size, *output);
838 * Low end of the randomization range should be the
839 * smaller of 512M or the initial kernel image
842 min_addr = min(*output, 512UL << 20);
844 /* Walk available memory entries to find a random address. */
845 random_addr = find_random_phys_addr(min_addr, output_size);
847 warn("Physical KASLR disabled: no suitable memory region!");
849 /* Update the new physical address location. */
850 if (*output != random_addr) {
851 add_identity_map(random_addr, output_size);
852 *output = random_addr;
856 * This loads the identity mapping page table.
857 * This should only be done if a new physical address
858 * is found for the kernel, otherwise we should keep
859 * the old page table to make it be like the "nokaslr"
862 finalize_identity_maps();
866 /* Pick random virtual address starting from LOAD_PHYSICAL_ADDR. */
867 if (IS_ENABLED(CONFIG_X86_64))
868 random_addr = find_random_virt_addr(LOAD_PHYSICAL_ADDR, output_size);
869 *virt_addr = random_addr;