2 * This file contains the routines for handling the MMU on those
3 * PowerPC implementations where the MMU substantially follows the
4 * architecture specification. This includes the 6xx, 7xx, 7xxx,
5 * and 8260 implementations but excludes the 8xx and 4xx.
8 * Derived from arch/ppc/mm/init.c:
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
12 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
13 * Copyright (C) 1996 Paul Mackerras
15 * Derived from "arch/i386/mm/init.c"
16 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
25 #include <linux/kernel.h>
27 #include <linux/init.h>
28 #include <linux/highmem.h>
29 #include <linux/memblock.h>
33 #include <asm/machdep.h>
34 #include <asm/code-patching.h>
38 struct hash_pte *Hash, *Hash_end;
39 unsigned long Hash_size, Hash_mask;
42 struct ppc_bat BATS[8][2]; /* 8 pairs of IBAT, DBAT */
44 struct batrange { /* stores address ranges mapped by BATs */
51 * Return PA for this VA if it is mapped by a BAT, or 0
53 phys_addr_t v_block_mapped(unsigned long va)
56 for (b = 0; b < ARRAY_SIZE(bat_addrs); ++b)
57 if (va >= bat_addrs[b].start && va < bat_addrs[b].limit)
58 return bat_addrs[b].phys + (va - bat_addrs[b].start);
63 * Return VA for a given PA or 0 if not mapped
65 unsigned long p_block_mapped(phys_addr_t pa)
68 for (b = 0; b < ARRAY_SIZE(bat_addrs); ++b)
69 if (pa >= bat_addrs[b].phys
70 && pa < (bat_addrs[b].limit-bat_addrs[b].start)
72 return bat_addrs[b].start+(pa-bat_addrs[b].phys);
76 static int find_free_bat(void)
80 if (cpu_has_feature(CPU_FTR_601)) {
81 for (b = 0; b < 4; b++) {
82 struct ppc_bat *bat = BATS[b];
84 if (!(bat[0].batl & 0x40))
88 int n = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
90 for (b = 0; b < n; b++) {
91 struct ppc_bat *bat = BATS[b];
93 if (!(bat[1].batu & 3))
100 static unsigned int block_size(unsigned long base, unsigned long top)
102 unsigned int max_size = (cpu_has_feature(CPU_FTR_601) ? 8 : 256) << 20;
103 unsigned int base_shift = (fls(base) - 1) & 31;
104 unsigned int block_shift = (fls(top - base) - 1) & 31;
106 return min3(max_size, 1U << base_shift, 1U << block_shift);
109 unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
113 if (__map_without_bats) {
114 printk(KERN_DEBUG "RAM mapped without BATs\n");
118 while ((idx = find_free_bat()) != -1 && base != top) {
119 unsigned int size = block_size(base, top);
121 if (size < 128 << 10)
123 setbat(idx, PAGE_OFFSET + base, base, size, PAGE_KERNEL_X);
131 * Set up one of the I/D BAT (block address translation) register pairs.
132 * The parameters are not checked; in particular size must be a power
133 * of 2 between 128k and 256M.
134 * On 603+, only set IBAT when _PAGE_EXEC is set
136 void __init setbat(int index, unsigned long virt, phys_addr_t phys,
137 unsigned int size, pgprot_t prot)
141 struct ppc_bat *bat = BATS[index];
142 unsigned long flags = pgprot_val(prot);
144 if ((flags & _PAGE_NO_CACHE) ||
145 (cpu_has_feature(CPU_FTR_NEED_COHERENT) == 0))
146 flags &= ~_PAGE_COHERENT;
148 bl = (size >> 17) - 1;
149 if (PVR_VER(mfspr(SPRN_PVR)) != 1) {
152 wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE
153 | _PAGE_COHERENT | _PAGE_GUARDED);
154 wimgxpp |= (flags & _PAGE_RW)? BPP_RW: BPP_RX;
155 bat[1].batu = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */
156 bat[1].batl = BAT_PHYS_ADDR(phys) | wimgxpp;
157 if (flags & _PAGE_USER)
158 bat[1].batu |= 1; /* Vp = 1 */
159 if (flags & _PAGE_GUARDED) {
160 /* G bit must be zero in IBATs */
161 flags &= ~_PAGE_EXEC;
163 if (flags & _PAGE_EXEC)
166 bat[0].batu = bat[0].batl = 0;
171 wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE
173 wimgxpp |= (flags & _PAGE_RW)?
174 ((flags & _PAGE_USER)? PP_RWRW: PP_RWXX): PP_RXRX;
175 bat->batu = virt | wimgxpp | 4; /* Ks=0, Ku=1 */
176 bat->batl = phys | bl | 0x40; /* V=1 */
179 bat_addrs[index].start = virt;
180 bat_addrs[index].limit = virt + ((bl + 1) << 17) - 1;
181 bat_addrs[index].phys = phys;
185 * Preload a translation in the hash table
187 void hash_preload(struct mm_struct *mm, unsigned long ea,
188 bool is_exec, unsigned long trap)
194 pmd = pmd_offset(pud_offset(pgd_offset(mm, ea), ea), ea);
196 add_hash_page(mm->context.id, ea, pmd_val(*pmd));
200 * Initialize the hash table and patch the instructions in hashtable.S.
202 void __init MMU_init_hw(void)
204 unsigned int hmask, mb, mb2;
205 unsigned int n_hpteg, lg_n_hpteg;
207 if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
210 if ( ppc_md.progress ) ppc_md.progress("hash:enter", 0x105);
212 #define LG_HPTEG_SIZE 6 /* 64 bytes per HPTEG */
213 #define SDR1_LOW_BITS ((n_hpteg - 1) >> 10)
214 #define MIN_N_HPTEG 1024 /* min 64kB hash table */
217 * Allow 1 HPTE (1/8 HPTEG) for each page of memory.
218 * This is less than the recommended amount, but then
221 n_hpteg = total_memory / (PAGE_SIZE * 8);
222 if (n_hpteg < MIN_N_HPTEG)
223 n_hpteg = MIN_N_HPTEG;
224 lg_n_hpteg = __ilog2(n_hpteg);
225 if (n_hpteg & (n_hpteg - 1)) {
226 ++lg_n_hpteg; /* round up if not power of 2 */
227 n_hpteg = 1 << lg_n_hpteg;
229 Hash_size = n_hpteg << LG_HPTEG_SIZE;
232 * Find some memory for the hash table.
234 if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);
235 Hash = __va(memblock_phys_alloc(Hash_size, Hash_size));
236 memset(Hash, 0, Hash_size);
237 _SDR1 = __pa(Hash) | SDR1_LOW_BITS;
239 Hash_end = (struct hash_pte *) ((unsigned long)Hash + Hash_size);
241 printk("Total memory = %lldMB; using %ldkB for hash table (at %p)\n",
242 (unsigned long long)(total_memory >> 20), Hash_size >> 10, Hash);
246 * Patch up the instructions in hashtable.S:create_hpte
248 if ( ppc_md.progress ) ppc_md.progress("hash:patch", 0x345);
249 Hash_mask = n_hpteg - 1;
250 hmask = Hash_mask >> (16 - LG_HPTEG_SIZE);
251 mb2 = mb = 32 - LG_HPTEG_SIZE - lg_n_hpteg;
253 mb2 = 16 - LG_HPTEG_SIZE;
255 modify_instruction_site(&patch__hash_page_A0, 0xffff,
256 ((unsigned int)Hash - PAGE_OFFSET) >> 16);
257 modify_instruction_site(&patch__hash_page_A1, 0x7c0, mb << 6);
258 modify_instruction_site(&patch__hash_page_A2, 0x7c0, mb2 << 6);
259 modify_instruction_site(&patch__hash_page_B, 0xffff, hmask);
260 modify_instruction_site(&patch__hash_page_C, 0xffff, hmask);
263 * Patch up the instructions in hashtable.S:flush_hash_page
265 modify_instruction_site(&patch__flush_hash_A0, 0xffff,
266 ((unsigned int)Hash - PAGE_OFFSET) >> 16);
267 modify_instruction_site(&patch__flush_hash_A1, 0x7c0, mb << 6);
268 modify_instruction_site(&patch__flush_hash_A2, 0x7c0, mb2 << 6);
269 modify_instruction_site(&patch__flush_hash_B, 0xffff, hmask);
271 if ( ppc_md.progress ) ppc_md.progress("hash:done", 0x205);
274 void setup_initial_memory_limit(phys_addr_t first_memblock_base,
275 phys_addr_t first_memblock_size)
277 /* We don't currently support the first MEMBLOCK not mapping 0
278 * physical on those processors
280 BUG_ON(first_memblock_base != 0);
282 /* 601 can only access 16MB at the moment */
283 if (PVR_VER(mfspr(SPRN_PVR)) == 1)
284 memblock_set_current_limit(min_t(u64, first_memblock_size, 0x01000000));
285 else /* Anything else has 256M mapped */
286 memblock_set_current_limit(min_t(u64, first_memblock_size, 0x10000000));