]> asedeno.scripts.mit.edu Git - linux.git/blob - arch/riscv/mm/init.c
Merge tag 'tty-5.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty
[linux.git] / arch / riscv / mm / init.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012 Regents of the University of California
4  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
5  */
6
7 #include <linux/init.h>
8 #include <linux/mm.h>
9 #include <linux/memblock.h>
10 #include <linux/initrd.h>
11 #include <linux/swap.h>
12 #include <linux/sizes.h>
13 #include <linux/of_fdt.h>
14 #include <linux/libfdt.h>
15
16 #include <asm/fixmap.h>
17 #include <asm/tlbflush.h>
18 #include <asm/sections.h>
19 #include <asm/pgtable.h>
20 #include <asm/io.h>
21
22 #include "../kernel/head.h"
23
24 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
25                                                         __page_aligned_bss;
26 EXPORT_SYMBOL(empty_zero_page);
27
28 extern char _start[];
29 void *dtb_early_va;
30
31 static void __init zone_sizes_init(void)
32 {
33         unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
34
35 #ifdef CONFIG_ZONE_DMA32
36         max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G,
37                         (unsigned long) PFN_PHYS(max_low_pfn)));
38 #endif
39         max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
40
41         free_area_init_nodes(max_zone_pfns);
42 }
43
44 static void setup_zero_page(void)
45 {
46         memset((void *)empty_zero_page, 0, PAGE_SIZE);
47 }
48
49 void __init mem_init(void)
50 {
51 #ifdef CONFIG_FLATMEM
52         BUG_ON(!mem_map);
53 #endif /* CONFIG_FLATMEM */
54
55         high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
56         memblock_free_all();
57
58         mem_init_print_info(NULL);
59 }
60
61 #ifdef CONFIG_BLK_DEV_INITRD
62 static void __init setup_initrd(void)
63 {
64         unsigned long size;
65
66         if (initrd_start >= initrd_end) {
67                 pr_info("initrd not found or empty");
68                 goto disable;
69         }
70         if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
71                 pr_err("initrd extends beyond end of memory");
72                 goto disable;
73         }
74
75         size = initrd_end - initrd_start;
76         memblock_reserve(__pa(initrd_start), size);
77         initrd_below_start_ok = 1;
78
79         pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
80                 (void *)(initrd_start), size);
81         return;
82 disable:
83         pr_cont(" - disabling initrd\n");
84         initrd_start = 0;
85         initrd_end = 0;
86 }
87 #endif /* CONFIG_BLK_DEV_INITRD */
88
89 static phys_addr_t dtb_early_pa __initdata;
90
91 void __init setup_bootmem(void)
92 {
93         struct memblock_region *reg;
94         phys_addr_t mem_size = 0;
95         phys_addr_t vmlinux_end = __pa(&_end);
96         phys_addr_t vmlinux_start = __pa(&_start);
97
98         /* Find the memory region containing the kernel */
99         for_each_memblock(memory, reg) {
100                 phys_addr_t end = reg->base + reg->size;
101
102                 if (reg->base <= vmlinux_end && vmlinux_end <= end) {
103                         mem_size = min(reg->size, (phys_addr_t)-PAGE_OFFSET);
104
105                         /*
106                          * Remove memblock from the end of usable area to the
107                          * end of region
108                          */
109                         if (reg->base + mem_size < end)
110                                 memblock_remove(reg->base + mem_size,
111                                                 end - reg->base - mem_size);
112                 }
113         }
114         BUG_ON(mem_size == 0);
115
116         /* Reserve from the start of the kernel to the end of the kernel */
117         memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
118
119         set_max_mapnr(PFN_DOWN(mem_size));
120         max_low_pfn = PFN_DOWN(memblock_end_of_DRAM());
121
122 #ifdef CONFIG_BLK_DEV_INITRD
123         setup_initrd();
124 #endif /* CONFIG_BLK_DEV_INITRD */
125
126         /*
127          * Avoid using early_init_fdt_reserve_self() since __pa() does
128          * not work for DTB pointers that are fixmap addresses
129          */
130         memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
131
132         early_init_fdt_scan_reserved_mem();
133         memblock_allow_resize();
134         memblock_dump_all();
135
136         for_each_memblock(memory, reg) {
137                 unsigned long start_pfn = memblock_region_memory_base_pfn(reg);
138                 unsigned long end_pfn = memblock_region_memory_end_pfn(reg);
139
140                 memblock_set_node(PFN_PHYS(start_pfn),
141                                   PFN_PHYS(end_pfn - start_pfn),
142                                   &memblock.memory, 0);
143         }
144 }
145
146 #ifdef CONFIG_MMU
147 unsigned long va_pa_offset;
148 EXPORT_SYMBOL(va_pa_offset);
149 unsigned long pfn_base;
150 EXPORT_SYMBOL(pfn_base);
151
152 pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
153 pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
154 pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
155 static bool mmu_enabled;
156
157 #define MAX_EARLY_MAPPING_SIZE  SZ_128M
158
159 pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
160
161 void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
162 {
163         unsigned long addr = __fix_to_virt(idx);
164         pte_t *ptep;
165
166         BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
167
168         ptep = &fixmap_pte[pte_index(addr)];
169
170         if (pgprot_val(prot)) {
171                 set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
172         } else {
173                 pte_clear(&init_mm, addr, ptep);
174                 local_flush_tlb_page(addr);
175         }
176 }
177
178 static pte_t *__init get_pte_virt(phys_addr_t pa)
179 {
180         if (mmu_enabled) {
181                 clear_fixmap(FIX_PTE);
182                 return (pte_t *)set_fixmap_offset(FIX_PTE, pa);
183         } else {
184                 return (pte_t *)((uintptr_t)pa);
185         }
186 }
187
188 static phys_addr_t __init alloc_pte(uintptr_t va)
189 {
190         /*
191          * We only create PMD or PGD early mappings so we
192          * should never reach here with MMU disabled.
193          */
194         BUG_ON(!mmu_enabled);
195
196         return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
197 }
198
199 static void __init create_pte_mapping(pte_t *ptep,
200                                       uintptr_t va, phys_addr_t pa,
201                                       phys_addr_t sz, pgprot_t prot)
202 {
203         uintptr_t pte_index = pte_index(va);
204
205         BUG_ON(sz != PAGE_SIZE);
206
207         if (pte_none(ptep[pte_index]))
208                 ptep[pte_index] = pfn_pte(PFN_DOWN(pa), prot);
209 }
210
211 #ifndef __PAGETABLE_PMD_FOLDED
212
213 pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss;
214 pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;
215
216 #if MAX_EARLY_MAPPING_SIZE < PGDIR_SIZE
217 #define NUM_EARLY_PMDS          1UL
218 #else
219 #define NUM_EARLY_PMDS          (1UL + MAX_EARLY_MAPPING_SIZE / PGDIR_SIZE)
220 #endif
221 pmd_t early_pmd[PTRS_PER_PMD * NUM_EARLY_PMDS] __initdata __aligned(PAGE_SIZE);
222
223 static pmd_t *__init get_pmd_virt(phys_addr_t pa)
224 {
225         if (mmu_enabled) {
226                 clear_fixmap(FIX_PMD);
227                 return (pmd_t *)set_fixmap_offset(FIX_PMD, pa);
228         } else {
229                 return (pmd_t *)((uintptr_t)pa);
230         }
231 }
232
233 static phys_addr_t __init alloc_pmd(uintptr_t va)
234 {
235         uintptr_t pmd_num;
236
237         if (mmu_enabled)
238                 return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
239
240         pmd_num = (va - PAGE_OFFSET) >> PGDIR_SHIFT;
241         BUG_ON(pmd_num >= NUM_EARLY_PMDS);
242         return (uintptr_t)&early_pmd[pmd_num * PTRS_PER_PMD];
243 }
244
245 static void __init create_pmd_mapping(pmd_t *pmdp,
246                                       uintptr_t va, phys_addr_t pa,
247                                       phys_addr_t sz, pgprot_t prot)
248 {
249         pte_t *ptep;
250         phys_addr_t pte_phys;
251         uintptr_t pmd_index = pmd_index(va);
252
253         if (sz == PMD_SIZE) {
254                 if (pmd_none(pmdp[pmd_index]))
255                         pmdp[pmd_index] = pfn_pmd(PFN_DOWN(pa), prot);
256                 return;
257         }
258
259         if (pmd_none(pmdp[pmd_index])) {
260                 pte_phys = alloc_pte(va);
261                 pmdp[pmd_index] = pfn_pmd(PFN_DOWN(pte_phys), PAGE_TABLE);
262                 ptep = get_pte_virt(pte_phys);
263                 memset(ptep, 0, PAGE_SIZE);
264         } else {
265                 pte_phys = PFN_PHYS(_pmd_pfn(pmdp[pmd_index]));
266                 ptep = get_pte_virt(pte_phys);
267         }
268
269         create_pte_mapping(ptep, va, pa, sz, prot);
270 }
271
272 #define pgd_next_t              pmd_t
273 #define alloc_pgd_next(__va)    alloc_pmd(__va)
274 #define get_pgd_next_virt(__pa) get_pmd_virt(__pa)
275 #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)      \
276         create_pmd_mapping(__nextp, __va, __pa, __sz, __prot)
277 #define fixmap_pgd_next         fixmap_pmd
278 #else
279 #define pgd_next_t              pte_t
280 #define alloc_pgd_next(__va)    alloc_pte(__va)
281 #define get_pgd_next_virt(__pa) get_pte_virt(__pa)
282 #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)      \
283         create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
284 #define fixmap_pgd_next         fixmap_pte
285 #endif
286
287 static void __init create_pgd_mapping(pgd_t *pgdp,
288                                       uintptr_t va, phys_addr_t pa,
289                                       phys_addr_t sz, pgprot_t prot)
290 {
291         pgd_next_t *nextp;
292         phys_addr_t next_phys;
293         uintptr_t pgd_index = pgd_index(va);
294
295         if (sz == PGDIR_SIZE) {
296                 if (pgd_val(pgdp[pgd_index]) == 0)
297                         pgdp[pgd_index] = pfn_pgd(PFN_DOWN(pa), prot);
298                 return;
299         }
300
301         if (pgd_val(pgdp[pgd_index]) == 0) {
302                 next_phys = alloc_pgd_next(va);
303                 pgdp[pgd_index] = pfn_pgd(PFN_DOWN(next_phys), PAGE_TABLE);
304                 nextp = get_pgd_next_virt(next_phys);
305                 memset(nextp, 0, PAGE_SIZE);
306         } else {
307                 next_phys = PFN_PHYS(_pgd_pfn(pgdp[pgd_index]));
308                 nextp = get_pgd_next_virt(next_phys);
309         }
310
311         create_pgd_next_mapping(nextp, va, pa, sz, prot);
312 }
313
314 static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
315 {
316         /* Upgrade to PMD_SIZE mappings whenever possible */
317         if ((base & (PMD_SIZE - 1)) || (size & (PMD_SIZE - 1)))
318                 return PAGE_SIZE;
319
320         return PMD_SIZE;
321 }
322
323 /*
324  * setup_vm() is called from head.S with MMU-off.
325  *
326  * Following requirements should be honoured for setup_vm() to work
327  * correctly:
328  * 1) It should use PC-relative addressing for accessing kernel symbols.
329  *    To achieve this we always use GCC cmodel=medany.
330  * 2) The compiler instrumentation for FTRACE will not work for setup_vm()
331  *    so disable compiler instrumentation when FTRACE is enabled.
332  *
333  * Currently, the above requirements are honoured by using custom CFLAGS
334  * for init.o in mm/Makefile.
335  */
336
337 #ifndef __riscv_cmodel_medany
338 #error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
339 #endif
340
341 asmlinkage void __init setup_vm(uintptr_t dtb_pa)
342 {
343         uintptr_t va, end_va;
344         uintptr_t load_pa = (uintptr_t)(&_start);
345         uintptr_t load_sz = (uintptr_t)(&_end) - load_pa;
346         uintptr_t map_size = best_map_size(load_pa, MAX_EARLY_MAPPING_SIZE);
347
348         va_pa_offset = PAGE_OFFSET - load_pa;
349         pfn_base = PFN_DOWN(load_pa);
350
351         /*
352          * Enforce boot alignment requirements of RV32 and
353          * RV64 by only allowing PMD or PGD mappings.
354          */
355         BUG_ON(map_size == PAGE_SIZE);
356
357         /* Sanity check alignment and size */
358         BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
359         BUG_ON((load_pa % map_size) != 0);
360         BUG_ON(load_sz > MAX_EARLY_MAPPING_SIZE);
361
362         /* Setup early PGD for fixmap */
363         create_pgd_mapping(early_pg_dir, FIXADDR_START,
364                            (uintptr_t)fixmap_pgd_next, PGDIR_SIZE, PAGE_TABLE);
365
366 #ifndef __PAGETABLE_PMD_FOLDED
367         /* Setup fixmap PMD */
368         create_pmd_mapping(fixmap_pmd, FIXADDR_START,
369                            (uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE);
370         /* Setup trampoline PGD and PMD */
371         create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET,
372                            (uintptr_t)trampoline_pmd, PGDIR_SIZE, PAGE_TABLE);
373         create_pmd_mapping(trampoline_pmd, PAGE_OFFSET,
374                            load_pa, PMD_SIZE, PAGE_KERNEL_EXEC);
375 #else
376         /* Setup trampoline PGD */
377         create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET,
378                            load_pa, PGDIR_SIZE, PAGE_KERNEL_EXEC);
379 #endif
380
381         /*
382          * Setup early PGD covering entire kernel which will allows
383          * us to reach paging_init(). We map all memory banks later
384          * in setup_vm_final() below.
385          */
386         end_va = PAGE_OFFSET + load_sz;
387         for (va = PAGE_OFFSET; va < end_va; va += map_size)
388                 create_pgd_mapping(early_pg_dir, va,
389                                    load_pa + (va - PAGE_OFFSET),
390                                    map_size, PAGE_KERNEL_EXEC);
391
392         /* Create fixed mapping for early FDT parsing */
393         end_va = __fix_to_virt(FIX_FDT) + FIX_FDT_SIZE;
394         for (va = __fix_to_virt(FIX_FDT); va < end_va; va += PAGE_SIZE)
395                 create_pte_mapping(fixmap_pte, va,
396                                    dtb_pa + (va - __fix_to_virt(FIX_FDT)),
397                                    PAGE_SIZE, PAGE_KERNEL);
398
399         /* Save pointer to DTB for early FDT parsing */
400         dtb_early_va = (void *)fix_to_virt(FIX_FDT) + (dtb_pa & ~PAGE_MASK);
401         /* Save physical address for memblock reservation */
402         dtb_early_pa = dtb_pa;
403 }
404
405 static void __init setup_vm_final(void)
406 {
407         uintptr_t va, map_size;
408         phys_addr_t pa, start, end;
409         struct memblock_region *reg;
410
411         /* Set mmu_enabled flag */
412         mmu_enabled = true;
413
414         /* Setup swapper PGD for fixmap */
415         create_pgd_mapping(swapper_pg_dir, FIXADDR_START,
416                            __pa(fixmap_pgd_next),
417                            PGDIR_SIZE, PAGE_TABLE);
418
419         /* Map all memory banks */
420         for_each_memblock(memory, reg) {
421                 start = reg->base;
422                 end = start + reg->size;
423
424                 if (start >= end)
425                         break;
426                 if (memblock_is_nomap(reg))
427                         continue;
428                 if (start <= __pa(PAGE_OFFSET) &&
429                     __pa(PAGE_OFFSET) < end)
430                         start = __pa(PAGE_OFFSET);
431
432                 map_size = best_map_size(start, end - start);
433                 for (pa = start; pa < end; pa += map_size) {
434                         va = (uintptr_t)__va(pa);
435                         create_pgd_mapping(swapper_pg_dir, va, pa,
436                                            map_size, PAGE_KERNEL_EXEC);
437                 }
438         }
439
440         /* Clear fixmap PTE and PMD mappings */
441         clear_fixmap(FIX_PTE);
442         clear_fixmap(FIX_PMD);
443
444         /* Move to swapper page table */
445         csr_write(CSR_SATP, PFN_DOWN(__pa(swapper_pg_dir)) | SATP_MODE);
446         local_flush_tlb_all();
447 }
448 #else
449 asmlinkage void __init setup_vm(uintptr_t dtb_pa)
450 {
451         dtb_early_va = (void *)dtb_pa;
452 }
453
454 static inline void setup_vm_final(void)
455 {
456 }
457 #endif /* CONFIG_MMU */
458
459 void __init paging_init(void)
460 {
461         setup_vm_final();
462         memblocks_present();
463         sparse_init();
464         setup_zero_page();
465         zone_sizes_init();
466 }
467
468 #ifdef CONFIG_SPARSEMEM_VMEMMAP
469 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
470                                struct vmem_altmap *altmap)
471 {
472         return vmemmap_populate_basepages(start, end, node);
473 }
474 #endif