1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * This file contains the routines for initializing the MMU
4 * on the 8xx series of chips.
7 * Derived from arch/powerpc/mm/40x_mmu.c:
10 #include <linux/memblock.h>
11 #include <linux/mmu_context.h>
12 #include <asm/fixmap.h>
13 #include <asm/code-patching.h>
15 #include <mm/mmu_decl.h>
17 #define IMMR_SIZE (FIX_IMMR_SIZE << PAGE_SHIFT)
19 extern int __map_without_ltlbs;
21 static unsigned long block_mapped_ram;
24 * Return PA for this VA if it is in an area mapped with LTLBs or fixmap.
25 * Otherwise, returns 0
27 phys_addr_t v_block_mapped(unsigned long va)
29 unsigned long p = PHYS_IMMR_BASE;
31 if (va >= VIRT_IMMR_BASE && va < VIRT_IMMR_BASE + IMMR_SIZE)
32 return p + va - VIRT_IMMR_BASE;
33 if (__map_without_ltlbs)
35 if (va >= PAGE_OFFSET && va < PAGE_OFFSET + block_mapped_ram)
41 * Return VA for a given PA mapped with LTLBs or fixmap
42 * Return 0 if not mapped
44 unsigned long p_block_mapped(phys_addr_t pa)
46 unsigned long p = PHYS_IMMR_BASE;
48 if (pa >= p && pa < p + IMMR_SIZE)
49 return VIRT_IMMR_BASE + pa - p;
50 if (__map_without_ltlbs)
52 if (pa < block_mapped_ram)
53 return (unsigned long)__va(pa);
57 #define LARGE_PAGE_SIZE_8M (1<<23)
60 * MMU_init_hw does the chip-specific initialization of the MMU hardware.
62 void __init MMU_init_hw(void)
64 /* PIN up to the 3 first 8Mb after IMMR in DTLB table */
65 if (IS_ENABLED(CONFIG_PIN_TLB_DATA)) {
66 unsigned long ctr = mfspr(SPRN_MD_CTR) & 0xfe000000;
67 unsigned long flags = 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY;
68 int i = IS_ENABLED(CONFIG_PIN_TLB_IMMR) ? 29 : 28;
69 unsigned long addr = 0;
70 unsigned long mem = total_lowmem;
72 for (; i < 32 && mem >= LARGE_PAGE_SIZE_8M; i++) {
73 mtspr(SPRN_MD_CTR, ctr | (i << 8));
74 mtspr(SPRN_MD_EPN, (unsigned long)__va(addr) | MD_EVALID);
75 mtspr(SPRN_MD_TWC, MD_PS8MEG | MD_SVALID);
76 mtspr(SPRN_MD_RPN, addr | flags | _PAGE_PRESENT);
77 addr += LARGE_PAGE_SIZE_8M;
78 mem -= LARGE_PAGE_SIZE_8M;
83 static void __init mmu_mapin_immr(void)
85 unsigned long p = PHYS_IMMR_BASE;
86 unsigned long v = VIRT_IMMR_BASE;
89 for (offset = 0; offset < IMMR_SIZE; offset += PAGE_SIZE)
90 map_kernel_page(v + offset, p + offset, PAGE_KERNEL_NCG);
93 static void mmu_patch_cmp_limit(s32 *site, unsigned long mapped)
95 modify_instruction_site(site, 0xffff, (unsigned long)__va(mapped) >> 16);
98 static void mmu_patch_addis(s32 *site, long simm)
100 unsigned int instr = *(unsigned int *)patch_site_addr(site);
103 instr |= ((unsigned long)simm) >> 16;
104 patch_instruction_site(site, instr);
107 static void mmu_mapin_ram_chunk(unsigned long offset, unsigned long top, pgprot_t prot)
109 unsigned long s = offset;
110 unsigned long v = PAGE_OFFSET + s;
111 phys_addr_t p = memstart_addr + s;
113 for (; s < top; s += PAGE_SIZE) {
114 map_kernel_page(v, p, prot);
120 unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
122 unsigned long mapped;
124 if (__map_without_ltlbs) {
127 if (!IS_ENABLED(CONFIG_PIN_TLB_IMMR))
128 patch_instruction_site(&patch__dtlbmiss_immr_jmp, PPC_INST_NOP);
129 if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT))
130 mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, 0);
132 unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
134 mapped = top & ~(LARGE_PAGE_SIZE_8M - 1);
135 if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT))
136 mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, einittext8);
139 * Populate page tables to:
140 * - have them appear in /sys/kernel/debug/kernel_page_tables
141 * - allow the BDI to find the pages when they are not PINNED
143 mmu_mapin_ram_chunk(0, einittext8, PAGE_KERNEL_X);
144 mmu_mapin_ram_chunk(einittext8, mapped, PAGE_KERNEL);
148 mmu_patch_cmp_limit(&patch__dtlbmiss_linmem_top, mapped);
149 mmu_patch_cmp_limit(&patch__fixupdar_linmem_top, mapped);
151 /* If the size of RAM is not an exact power of two, we may not
152 * have covered RAM in its entirety with 8 MiB
153 * pages. Consequently, restrict the top end of RAM currently
154 * allocable so that calls to the MEMBLOCK to allocate PTEs for "tail"
155 * coverage with normal-sized pages (or other reasons) do not
156 * attempt to allocate outside the allowed range.
159 memblock_set_current_limit(mapped);
161 block_mapped_ram = mapped;
166 void mmu_mark_initmem_nx(void)
168 if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX) && CONFIG_ETEXT_SHIFT < 23)
169 mmu_patch_addis(&patch__itlbmiss_linmem_top8,
170 -((long)_etext & ~(LARGE_PAGE_SIZE_8M - 1)));
171 if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT)) {
172 unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
173 unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
174 unsigned long etext = __pa(_etext);
176 mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, __pa(_etext));
178 /* Update page tables for PTDUMP and BDI */
179 mmu_mapin_ram_chunk(0, einittext8, __pgprot(0));
180 if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) {
181 mmu_mapin_ram_chunk(0, etext, PAGE_KERNEL_TEXT);
182 mmu_mapin_ram_chunk(etext, einittext8, PAGE_KERNEL);
184 mmu_mapin_ram_chunk(0, etext8, PAGE_KERNEL_TEXT);
185 mmu_mapin_ram_chunk(etext8, einittext8, PAGE_KERNEL);
190 #ifdef CONFIG_STRICT_KERNEL_RWX
191 void mmu_mark_rodata_ro(void)
193 unsigned long sinittext = __pa(_sinittext);
194 unsigned long etext = __pa(_etext);
196 if (CONFIG_DATA_SHIFT < 23)
197 mmu_patch_addis(&patch__dtlbmiss_romem_top8,
198 -__pa(((unsigned long)_sinittext) &
199 ~(LARGE_PAGE_SIZE_8M - 1)));
200 mmu_patch_addis(&patch__dtlbmiss_romem_top, -__pa(_sinittext));
202 /* Update page tables for PTDUMP and BDI */
203 mmu_mapin_ram_chunk(0, sinittext, __pgprot(0));
204 mmu_mapin_ram_chunk(0, etext, PAGE_KERNEL_ROX);
205 mmu_mapin_ram_chunk(etext, sinittext, PAGE_KERNEL_RO);
209 void __init setup_initial_memory_limit(phys_addr_t first_memblock_base,
210 phys_addr_t first_memblock_size)
212 /* We don't currently support the first MEMBLOCK not mapping 0
213 * physical on those processors
215 BUG_ON(first_memblock_base != 0);
217 /* 8xx can only access 32MB at the moment */
218 memblock_set_current_limit(min_t(u64, first_memblock_size, 0x02000000));
222 * Set up to use a given MMU context.
223 * id is context number, pgd is PGD pointer.
225 * We place the physical address of the new task page directory loaded
226 * into the MMU base register, and set the ASID compare register with
229 void set_context(unsigned long id, pgd_t *pgd)
231 s16 offset = (s16)(__pa(swapper_pg_dir));
233 /* Context switch the PTE pointer for the Abatron BDI2000.
234 * The PGDIR is passed as second argument.
236 if (IS_ENABLED(CONFIG_BDI_SWITCH))
237 abatron_pteptrs[1] = pgd;
239 /* Register M_TWB will contain base address of level 1 table minus the
240 * lower part of the kernel PGDIR base address, so that all accesses to
241 * level 1 table are done relative to lower part of kernel PGDIR base
244 mtspr(SPRN_M_TWB, __pa(pgd) - offset);
247 mtspr(SPRN_M_CASID, id - 1);
252 void flush_instruction_cache(void)
255 mtspr(SPRN_IC_CST, IDC_INVALL);
259 #ifdef CONFIG_PPC_KUEP
260 void __init setup_kuep(bool disabled)
265 pr_info("Activating Kernel Userspace Execution Prevention\n");
267 mtspr(SPRN_MI_AP, MI_APG_KUEP);
271 #ifdef CONFIG_PPC_KUAP
272 void __init setup_kuap(bool disabled)
274 pr_info("Activating Kernel Userspace Access Protection\n");
277 pr_warn("KUAP cannot be disabled yet on 8xx when compiled in\n");
279 mtspr(SPRN_MD_AP, MD_APG_KUAP);