1 // SPDX-License-Identifier: GPL-2.0
3 * prepare to run common code
5 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
8 #define DISABLE_BRANCH_PROFILING
9 #include <linux/init.h>
10 #include <linux/linkage.h>
11 #include <linux/types.h>
12 #include <linux/kernel.h>
13 #include <linux/string.h>
14 #include <linux/percpu.h>
15 #include <linux/start_kernel.h>
17 #include <linux/memblock.h>
18 #include <linux/mem_encrypt.h>
20 #include <asm/processor.h>
21 #include <asm/proto.h>
23 #include <asm/setup.h>
25 #include <asm/pgtable.h>
26 #include <asm/tlbflush.h>
27 #include <asm/sections.h>
28 #include <asm/kdebug.h>
29 #include <asm/e820/api.h>
30 #include <asm/bios_ebda.h>
31 #include <asm/bootparam_utils.h>
32 #include <asm/microcode.h>
33 #include <asm/kasan.h>
36 * Manage page tables very early on.
38 extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD];
39 static unsigned int __initdata next_early_pgt;
40 pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
42 #ifdef CONFIG_X86_5LEVEL
43 unsigned int pgtable_l5_enabled __ro_after_init;
44 EXPORT_SYMBOL(pgtable_l5_enabled);
45 unsigned int pgdir_shift __ro_after_init = 48;
46 EXPORT_SYMBOL(pgdir_shift);
47 unsigned int ptrs_per_p4d __ro_after_init = 512;
48 EXPORT_SYMBOL(ptrs_per_p4d);
51 #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT
52 unsigned long page_offset_base __ro_after_init = __PAGE_OFFSET_BASE;
53 EXPORT_SYMBOL(page_offset_base);
54 unsigned long vmalloc_base __ro_after_init = __VMALLOC_BASE;
55 EXPORT_SYMBOL(vmalloc_base);
56 unsigned long vmemmap_base __ro_after_init = __VMEMMAP_BASE;
57 EXPORT_SYMBOL(vmemmap_base);
60 #define __head __section(.head.text)
62 static void __head *fixup_pointer(void *ptr, unsigned long physaddr)
64 return ptr - (void *)_text + (void *)physaddr;
67 #ifdef CONFIG_X86_5LEVEL
68 static unsigned int __head *fixup_int(void *ptr, unsigned long physaddr)
70 return fixup_pointer(ptr, physaddr);
73 static void __head check_la57_support(unsigned long physaddr)
75 if (native_cpuid_eax(0) < 7)
78 if (!(native_cpuid_ecx(7) & (1 << (X86_FEATURE_LA57 & 31))))
81 *fixup_int(&pgtable_l5_enabled, physaddr) = 1;
84 static void __head check_la57_support(unsigned long physaddr) {}
87 unsigned long __head __startup_64(unsigned long physaddr,
88 struct boot_params *bp)
90 unsigned long load_delta, *p;
91 unsigned long pgtable_flags;
95 pmdval_t *pmd, pmd_entry;
97 unsigned int *next_pgt_ptr;
99 check_la57_support(physaddr);
101 /* Is the address too large? */
102 if (physaddr >> MAX_PHYSMEM_BITS)
106 * Compute the delta between the address I am compiled to run at
107 * and the address I am actually running at.
109 load_delta = physaddr - (unsigned long)(_text - __START_KERNEL_map);
111 /* Is the address not 2M aligned? */
112 if (load_delta & ~PMD_PAGE_MASK)
115 /* Activate Secure Memory Encryption (SME) if supported and enabled */
118 /* Include the SME encryption mask in the fixup value */
119 load_delta += sme_get_me_mask();
121 /* Fixup the physical addresses in the page table */
123 pgd = fixup_pointer(&early_top_pgt, physaddr);
124 pgd[pgd_index(__START_KERNEL_map)] += load_delta;
126 if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
127 p4d = fixup_pointer(&level4_kernel_pgt, physaddr);
128 p4d[511] += load_delta;
131 pud = fixup_pointer(&level3_kernel_pgt, physaddr);
132 pud[510] += load_delta;
133 pud[511] += load_delta;
135 pmd = fixup_pointer(level2_fixmap_pgt, physaddr);
136 pmd[506] += load_delta;
139 * Set up the identity mapping for the switchover. These
140 * entries should *NOT* have the global bit set! This also
141 * creates a bunch of nonsense entries but that is fine --
142 * it avoids problems around wraparound.
145 next_pgt_ptr = fixup_pointer(&next_early_pgt, physaddr);
146 pud = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++], physaddr);
147 pmd = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++], physaddr);
149 pgtable_flags = _KERNPG_TABLE_NOENC + sme_get_me_mask();
151 if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
152 p4d = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr);
154 i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
155 pgd[i + 0] = (pgdval_t)p4d + pgtable_flags;
156 pgd[i + 1] = (pgdval_t)p4d + pgtable_flags;
158 i = (physaddr >> P4D_SHIFT) % PTRS_PER_P4D;
159 p4d[i + 0] = (pgdval_t)pud + pgtable_flags;
160 p4d[i + 1] = (pgdval_t)pud + pgtable_flags;
162 i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
163 pgd[i + 0] = (pgdval_t)pud + pgtable_flags;
164 pgd[i + 1] = (pgdval_t)pud + pgtable_flags;
167 i = (physaddr >> PUD_SHIFT) % PTRS_PER_PUD;
168 pud[i + 0] = (pudval_t)pmd + pgtable_flags;
169 pud[i + 1] = (pudval_t)pmd + pgtable_flags;
171 pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL;
172 pmd_entry += sme_get_me_mask();
173 pmd_entry += physaddr;
175 for (i = 0; i < DIV_ROUND_UP(_end - _text, PMD_SIZE); i++) {
176 int idx = i + (physaddr >> PMD_SHIFT) % PTRS_PER_PMD;
177 pmd[idx] = pmd_entry + i * PMD_SIZE;
181 * Fixup the kernel text+data virtual addresses. Note that
182 * we might write invalid pmds, when the kernel is relocated
183 * cleanup_highmap() fixes this up along with the mappings
187 pmd = fixup_pointer(level2_kernel_pgt, physaddr);
188 for (i = 0; i < PTRS_PER_PMD; i++) {
189 if (pmd[i] & _PAGE_PRESENT)
190 pmd[i] += load_delta;
194 * Fixup phys_base - remove the memory encryption mask to obtain
195 * the true physical address.
197 p = fixup_pointer(&phys_base, physaddr);
198 *p += load_delta - sme_get_me_mask();
200 /* Encrypt the kernel and related (if SME is active) */
201 sme_encrypt_kernel(bp);
204 * Return the SME encryption mask (if SME is active) to be used as a
205 * modifier for the initial pgdir entry programmed into CR3.
207 return sme_get_me_mask();
210 unsigned long __startup_secondary_64(void)
213 * Return the SME encryption mask (if SME is active) to be used as a
214 * modifier for the initial pgdir entry programmed into CR3.
216 return sme_get_me_mask();
219 /* Wipe all early page tables except for the kernel symbol map */
220 static void __init reset_early_page_tables(void)
222 memset(early_top_pgt, 0, sizeof(pgd_t)*(PTRS_PER_PGD-1));
224 write_cr3(__sme_pa_nodebug(early_top_pgt));
227 /* Create a new PMD entry */
228 int __init __early_make_pgtable(unsigned long address, pmdval_t pmd)
230 unsigned long physaddr = address - __PAGE_OFFSET;
231 pgdval_t pgd, *pgd_p;
232 p4dval_t p4d, *p4d_p;
233 pudval_t pud, *pud_p;
236 /* Invalid address or early pgt is done ? */
237 if (physaddr >= MAXMEM || read_cr3_pa() != __pa_nodebug(early_top_pgt))
241 pgd_p = &early_top_pgt[pgd_index(address)].pgd;
245 * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
246 * critical -- __PAGE_OFFSET would point us back into the dynamic
247 * range and we might end up looping forever...
249 if (!IS_ENABLED(CONFIG_X86_5LEVEL))
252 p4d_p = (p4dval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
254 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
255 reset_early_page_tables();
259 p4d_p = (p4dval_t *)early_dynamic_pgts[next_early_pgt++];
260 memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D);
261 *pgd_p = (pgdval_t)p4d_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
263 p4d_p += p4d_index(address);
267 pud_p = (pudval_t *)((p4d & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
269 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
270 reset_early_page_tables();
274 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
275 memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
276 *p4d_p = (p4dval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
278 pud_p += pud_index(address);
282 pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
284 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
285 reset_early_page_tables();
289 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
290 memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD);
291 *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
293 pmd_p[pmd_index(address)] = pmd;
298 int __init early_make_pgtable(unsigned long address)
300 unsigned long physaddr = address - __PAGE_OFFSET;
303 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
305 return __early_make_pgtable(address, pmd);
308 /* Don't add a printk in there. printk relies on the PDA which is not initialized
310 static void __init clear_bss(void)
312 memset(__bss_start, 0,
313 (unsigned long) __bss_stop - (unsigned long) __bss_start);
316 static unsigned long get_cmd_line_ptr(void)
318 unsigned long cmd_line_ptr = boot_params.hdr.cmd_line_ptr;
320 cmd_line_ptr |= (u64)boot_params.ext_cmd_line_ptr << 32;
325 static void __init copy_bootdata(char *real_mode_data)
328 unsigned long cmd_line_ptr;
331 * If SME is active, this will create decrypted mappings of the
332 * boot data in advance of the copy operations.
334 sme_map_bootdata(real_mode_data);
336 memcpy(&boot_params, real_mode_data, sizeof boot_params);
337 sanitize_boot_params(&boot_params);
338 cmd_line_ptr = get_cmd_line_ptr();
340 command_line = __va(cmd_line_ptr);
341 memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
345 * The old boot data is no longer needed and won't be reserved,
346 * freeing up that memory for use by the system. If SME is active,
347 * we need to remove the mappings that were created so that the
348 * memory doesn't remain mapped as decrypted.
350 sme_unmap_bootdata(real_mode_data);
353 asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
356 * Build-time sanity checks on the kernel image and module
357 * area mappings. (these are purely build-time and produce no code)
359 BUILD_BUG_ON(MODULES_VADDR < __START_KERNEL_map);
360 BUILD_BUG_ON(MODULES_VADDR - __START_KERNEL_map < KERNEL_IMAGE_SIZE);
361 BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE);
362 BUILD_BUG_ON((__START_KERNEL_map & ~PMD_MASK) != 0);
363 BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0);
364 BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
365 MAYBE_BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
366 (__START_KERNEL & PGDIR_MASK)));
367 BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END);
371 /* Kill off the identity-map trampoline */
372 reset_early_page_tables();
376 clear_page(init_top_pgt);
379 * SME support may update early_pmd_flags to include the memory
380 * encryption mask, so it needs to be called before anything
381 * that may generate a page fault.
387 idt_setup_early_handler();
389 copy_bootdata(__va(real_mode_data));
392 * Load microcode early on BSP.
396 /* set init_top_pgt kernel high mapping*/
397 init_top_pgt[511] = early_top_pgt[511];
399 x86_64_start_reservations(real_mode_data);
402 void __init x86_64_start_reservations(char *real_mode_data)
404 /* version is always not zero if it is copied */
405 if (!boot_params.hdr.version)
406 copy_bootdata(__va(real_mode_data));
408 x86_early_init_platform_quirks();
410 switch (boot_params.hdr.hardware_subarch) {
411 case X86_SUBARCH_INTEL_MID:
412 x86_intel_mid_early_setup();