]> asedeno.scripts.mit.edu Git - linux.git/blob - arch/x86/kernel/head64.c
Merge branch 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
[linux.git] / arch / x86 / kernel / head64.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  prepare to run common code
4  *
5  *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6  */
7
8 #define DISABLE_BRANCH_PROFILING
9
10 /* cpu_feature_enabled() cannot be used this early */
11 #define USE_EARLY_PGTABLE_L5
12
13 #include <linux/init.h>
14 #include <linux/linkage.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/percpu.h>
19 #include <linux/start_kernel.h>
20 #include <linux/io.h>
21 #include <linux/memblock.h>
22 #include <linux/mem_encrypt.h>
23
24 #include <asm/processor.h>
25 #include <asm/proto.h>
26 #include <asm/smp.h>
27 #include <asm/setup.h>
28 #include <asm/desc.h>
29 #include <asm/pgtable.h>
30 #include <asm/tlbflush.h>
31 #include <asm/sections.h>
32 #include <asm/kdebug.h>
33 #include <asm/e820/api.h>
34 #include <asm/bios_ebda.h>
35 #include <asm/bootparam_utils.h>
36 #include <asm/microcode.h>
37 #include <asm/kasan.h>
38 #include <asm/fixmap.h>
39
40 /*
41  * Manage page tables very early on.
42  */
43 extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD];
44 static unsigned int __initdata next_early_pgt;
45 pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
46
47 #ifdef CONFIG_X86_5LEVEL
48 unsigned int __pgtable_l5_enabled __ro_after_init;
49 unsigned int pgdir_shift __ro_after_init = 39;
50 EXPORT_SYMBOL(pgdir_shift);
51 unsigned int ptrs_per_p4d __ro_after_init = 1;
52 EXPORT_SYMBOL(ptrs_per_p4d);
53 #endif
54
55 #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT
56 unsigned long page_offset_base __ro_after_init = __PAGE_OFFSET_BASE_L4;
57 EXPORT_SYMBOL(page_offset_base);
58 unsigned long vmalloc_base __ro_after_init = __VMALLOC_BASE_L4;
59 EXPORT_SYMBOL(vmalloc_base);
60 unsigned long vmemmap_base __ro_after_init = __VMEMMAP_BASE_L4;
61 EXPORT_SYMBOL(vmemmap_base);
62 #endif
63
64 #define __head  __section(.head.text)
65
66 static void __head *fixup_pointer(void *ptr, unsigned long physaddr)
67 {
68         return ptr - (void *)_text + (void *)physaddr;
69 }
70
71 static unsigned long __head *fixup_long(void *ptr, unsigned long physaddr)
72 {
73         return fixup_pointer(ptr, physaddr);
74 }
75
76 #ifdef CONFIG_X86_5LEVEL
77 static unsigned int __head *fixup_int(void *ptr, unsigned long physaddr)
78 {
79         return fixup_pointer(ptr, physaddr);
80 }
81
82 static bool __head check_la57_support(unsigned long physaddr)
83 {
84         /*
85          * 5-level paging is detected and enabled at kernel decomression
86          * stage. Only check if it has been enabled there.
87          */
88         if (!(native_read_cr4() & X86_CR4_LA57))
89                 return false;
90
91         *fixup_int(&__pgtable_l5_enabled, physaddr) = 1;
92         *fixup_int(&pgdir_shift, physaddr) = 48;
93         *fixup_int(&ptrs_per_p4d, physaddr) = 512;
94         *fixup_long(&page_offset_base, physaddr) = __PAGE_OFFSET_BASE_L5;
95         *fixup_long(&vmalloc_base, physaddr) = __VMALLOC_BASE_L5;
96         *fixup_long(&vmemmap_base, physaddr) = __VMEMMAP_BASE_L5;
97
98         return true;
99 }
100 #else
101 static bool __head check_la57_support(unsigned long physaddr)
102 {
103         return false;
104 }
105 #endif
106
107 /* Code in __startup_64() can be relocated during execution, but the compiler
108  * doesn't have to generate PC-relative relocations when accessing globals from
109  * that function. Clang actually does not generate them, which leads to
110  * boot-time crashes. To work around this problem, every global pointer must
111  * be adjusted using fixup_pointer().
112  */
113 unsigned long __head __startup_64(unsigned long physaddr,
114                                   struct boot_params *bp)
115 {
116         unsigned long vaddr, vaddr_end;
117         unsigned long load_delta, *p;
118         unsigned long pgtable_flags;
119         pgdval_t *pgd;
120         p4dval_t *p4d;
121         pudval_t *pud;
122         pmdval_t *pmd, pmd_entry;
123         pteval_t *mask_ptr;
124         bool la57;
125         int i;
126         unsigned int *next_pgt_ptr;
127
128         la57 = check_la57_support(physaddr);
129
130         /* Is the address too large? */
131         if (physaddr >> MAX_PHYSMEM_BITS)
132                 for (;;);
133
134         /*
135          * Compute the delta between the address I am compiled to run at
136          * and the address I am actually running at.
137          */
138         load_delta = physaddr - (unsigned long)(_text - __START_KERNEL_map);
139
140         /* Is the address not 2M aligned? */
141         if (load_delta & ~PMD_PAGE_MASK)
142                 for (;;);
143
144         /* Activate Secure Memory Encryption (SME) if supported and enabled */
145         sme_enable(bp);
146
147         /* Include the SME encryption mask in the fixup value */
148         load_delta += sme_get_me_mask();
149
150         /* Fixup the physical addresses in the page table */
151
152         pgd = fixup_pointer(&early_top_pgt, physaddr);
153         p = pgd + pgd_index(__START_KERNEL_map);
154         if (la57)
155                 *p = (unsigned long)level4_kernel_pgt;
156         else
157                 *p = (unsigned long)level3_kernel_pgt;
158         *p += _PAGE_TABLE_NOENC - __START_KERNEL_map + load_delta;
159
160         if (la57) {
161                 p4d = fixup_pointer(&level4_kernel_pgt, physaddr);
162                 p4d[511] += load_delta;
163         }
164
165         pud = fixup_pointer(&level3_kernel_pgt, physaddr);
166         pud[510] += load_delta;
167         pud[511] += load_delta;
168
169         pmd = fixup_pointer(level2_fixmap_pgt, physaddr);
170         for (i = FIXMAP_PMD_TOP; i > FIXMAP_PMD_TOP - FIXMAP_PMD_NUM; i--)
171                 pmd[i] += load_delta;
172
173         /*
174          * Set up the identity mapping for the switchover.  These
175          * entries should *NOT* have the global bit set!  This also
176          * creates a bunch of nonsense entries but that is fine --
177          * it avoids problems around wraparound.
178          */
179
180         next_pgt_ptr = fixup_pointer(&next_early_pgt, physaddr);
181         pud = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++], physaddr);
182         pmd = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++], physaddr);
183
184         pgtable_flags = _KERNPG_TABLE_NOENC + sme_get_me_mask();
185
186         if (la57) {
187                 p4d = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++],
188                                     physaddr);
189
190                 i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
191                 pgd[i + 0] = (pgdval_t)p4d + pgtable_flags;
192                 pgd[i + 1] = (pgdval_t)p4d + pgtable_flags;
193
194                 i = physaddr >> P4D_SHIFT;
195                 p4d[(i + 0) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
196                 p4d[(i + 1) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
197         } else {
198                 i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
199                 pgd[i + 0] = (pgdval_t)pud + pgtable_flags;
200                 pgd[i + 1] = (pgdval_t)pud + pgtable_flags;
201         }
202
203         i = physaddr >> PUD_SHIFT;
204         pud[(i + 0) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
205         pud[(i + 1) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
206
207         pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL;
208         /* Filter out unsupported __PAGE_KERNEL_* bits: */
209         mask_ptr = fixup_pointer(&__supported_pte_mask, physaddr);
210         pmd_entry &= *mask_ptr;
211         pmd_entry += sme_get_me_mask();
212         pmd_entry +=  physaddr;
213
214         for (i = 0; i < DIV_ROUND_UP(_end - _text, PMD_SIZE); i++) {
215                 int idx = i + (physaddr >> PMD_SHIFT);
216
217                 pmd[idx % PTRS_PER_PMD] = pmd_entry + i * PMD_SIZE;
218         }
219
220         /*
221          * Fixup the kernel text+data virtual addresses. Note that
222          * we might write invalid pmds, when the kernel is relocated
223          * cleanup_highmap() fixes this up along with the mappings
224          * beyond _end.
225          */
226
227         pmd = fixup_pointer(level2_kernel_pgt, physaddr);
228         for (i = 0; i < PTRS_PER_PMD; i++) {
229                 if (pmd[i] & _PAGE_PRESENT)
230                         pmd[i] += load_delta;
231         }
232
233         /*
234          * Fixup phys_base - remove the memory encryption mask to obtain
235          * the true physical address.
236          */
237         *fixup_long(&phys_base, physaddr) += load_delta - sme_get_me_mask();
238
239         /* Encrypt the kernel and related (if SME is active) */
240         sme_encrypt_kernel(bp);
241
242         /*
243          * Clear the memory encryption mask from the .bss..decrypted section.
244          * The bss section will be memset to zero later in the initialization so
245          * there is no need to zero it after changing the memory encryption
246          * attribute.
247          */
248         if (mem_encrypt_active()) {
249                 vaddr = (unsigned long)__start_bss_decrypted;
250                 vaddr_end = (unsigned long)__end_bss_decrypted;
251                 for (; vaddr < vaddr_end; vaddr += PMD_SIZE) {
252                         i = pmd_index(vaddr);
253                         pmd[i] -= sme_get_me_mask();
254                 }
255         }
256
257         /*
258          * Return the SME encryption mask (if SME is active) to be used as a
259          * modifier for the initial pgdir entry programmed into CR3.
260          */
261         return sme_get_me_mask();
262 }
263
264 unsigned long __startup_secondary_64(void)
265 {
266         /*
267          * Return the SME encryption mask (if SME is active) to be used as a
268          * modifier for the initial pgdir entry programmed into CR3.
269          */
270         return sme_get_me_mask();
271 }
272
273 /* Wipe all early page tables except for the kernel symbol map */
274 static void __init reset_early_page_tables(void)
275 {
276         memset(early_top_pgt, 0, sizeof(pgd_t)*(PTRS_PER_PGD-1));
277         next_early_pgt = 0;
278         write_cr3(__sme_pa_nodebug(early_top_pgt));
279 }
280
281 /* Create a new PMD entry */
282 int __init __early_make_pgtable(unsigned long address, pmdval_t pmd)
283 {
284         unsigned long physaddr = address - __PAGE_OFFSET;
285         pgdval_t pgd, *pgd_p;
286         p4dval_t p4d, *p4d_p;
287         pudval_t pud, *pud_p;
288         pmdval_t *pmd_p;
289
290         /* Invalid address or early pgt is done ?  */
291         if (physaddr >= MAXMEM || read_cr3_pa() != __pa_nodebug(early_top_pgt))
292                 return -1;
293
294 again:
295         pgd_p = &early_top_pgt[pgd_index(address)].pgd;
296         pgd = *pgd_p;
297
298         /*
299          * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
300          * critical -- __PAGE_OFFSET would point us back into the dynamic
301          * range and we might end up looping forever...
302          */
303         if (!pgtable_l5_enabled())
304                 p4d_p = pgd_p;
305         else if (pgd)
306                 p4d_p = (p4dval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
307         else {
308                 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
309                         reset_early_page_tables();
310                         goto again;
311                 }
312
313                 p4d_p = (p4dval_t *)early_dynamic_pgts[next_early_pgt++];
314                 memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D);
315                 *pgd_p = (pgdval_t)p4d_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
316         }
317         p4d_p += p4d_index(address);
318         p4d = *p4d_p;
319
320         if (p4d)
321                 pud_p = (pudval_t *)((p4d & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
322         else {
323                 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
324                         reset_early_page_tables();
325                         goto again;
326                 }
327
328                 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
329                 memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
330                 *p4d_p = (p4dval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
331         }
332         pud_p += pud_index(address);
333         pud = *pud_p;
334
335         if (pud)
336                 pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
337         else {
338                 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
339                         reset_early_page_tables();
340                         goto again;
341                 }
342
343                 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
344                 memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD);
345                 *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
346         }
347         pmd_p[pmd_index(address)] = pmd;
348
349         return 0;
350 }
351
352 int __init early_make_pgtable(unsigned long address)
353 {
354         unsigned long physaddr = address - __PAGE_OFFSET;
355         pmdval_t pmd;
356
357         pmd = (physaddr & PMD_MASK) + early_pmd_flags;
358
359         return __early_make_pgtable(address, pmd);
360 }
361
362 /* Don't add a printk in there. printk relies on the PDA which is not initialized 
363    yet. */
364 static void __init clear_bss(void)
365 {
366         memset(__bss_start, 0,
367                (unsigned long) __bss_stop - (unsigned long) __bss_start);
368 }
369
370 static unsigned long get_cmd_line_ptr(void)
371 {
372         unsigned long cmd_line_ptr = boot_params.hdr.cmd_line_ptr;
373
374         cmd_line_ptr |= (u64)boot_params.ext_cmd_line_ptr << 32;
375
376         return cmd_line_ptr;
377 }
378
379 static void __init copy_bootdata(char *real_mode_data)
380 {
381         char * command_line;
382         unsigned long cmd_line_ptr;
383
384         /*
385          * If SME is active, this will create decrypted mappings of the
386          * boot data in advance of the copy operations.
387          */
388         sme_map_bootdata(real_mode_data);
389
390         memcpy(&boot_params, real_mode_data, sizeof(boot_params));
391         sanitize_boot_params(&boot_params);
392         cmd_line_ptr = get_cmd_line_ptr();
393         if (cmd_line_ptr) {
394                 command_line = __va(cmd_line_ptr);
395                 memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
396         }
397
398         /*
399          * The old boot data is no longer needed and won't be reserved,
400          * freeing up that memory for use by the system. If SME is active,
401          * we need to remove the mappings that were created so that the
402          * memory doesn't remain mapped as decrypted.
403          */
404         sme_unmap_bootdata(real_mode_data);
405 }
406
407 asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
408 {
409         /*
410          * Build-time sanity checks on the kernel image and module
411          * area mappings. (these are purely build-time and produce no code)
412          */
413         BUILD_BUG_ON(MODULES_VADDR < __START_KERNEL_map);
414         BUILD_BUG_ON(MODULES_VADDR - __START_KERNEL_map < KERNEL_IMAGE_SIZE);
415         BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE);
416         BUILD_BUG_ON((__START_KERNEL_map & ~PMD_MASK) != 0);
417         BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0);
418         BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
419         MAYBE_BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
420                                 (__START_KERNEL & PGDIR_MASK)));
421         BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END);
422
423         cr4_init_shadow();
424
425         /* Kill off the identity-map trampoline */
426         reset_early_page_tables();
427
428         clear_bss();
429
430         clear_page(init_top_pgt);
431
432         /*
433          * SME support may update early_pmd_flags to include the memory
434          * encryption mask, so it needs to be called before anything
435          * that may generate a page fault.
436          */
437         sme_early_init();
438
439         kasan_early_init();
440
441         idt_setup_early_handler();
442
443         copy_bootdata(__va(real_mode_data));
444
445         /*
446          * Load microcode early on BSP.
447          */
448         load_ucode_bsp();
449
450         /* set init_top_pgt kernel high mapping*/
451         init_top_pgt[511] = early_top_pgt[511];
452
453         x86_64_start_reservations(real_mode_data);
454 }
455
456 void __init x86_64_start_reservations(char *real_mode_data)
457 {
458         /* version is always not zero if it is copied */
459         if (!boot_params.hdr.version)
460                 copy_bootdata(__va(real_mode_data));
461
462         x86_early_init_platform_quirks();
463
464         switch (boot_params.hdr.hardware_subarch) {
465         case X86_SUBARCH_INTEL_MID:
466                 x86_intel_mid_early_setup();
467                 break;
468         default:
469                 break;
470         }
471
472         start_kernel();
473 }