2 * Page table handling routines for radix page table.
4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/sched.h>
12 #include <linux/memblock.h>
13 #include <linux/of_fdt.h>
15 #include <asm/pgtable.h>
16 #include <asm/pgalloc.h>
18 #include <asm/machdep.h>
20 #include <asm/firmware.h>
22 #include <trace/events/thp.h>
24 static int native_register_process_table(unsigned long base, unsigned long pg_sz,
25 unsigned long table_size)
27 unsigned long patb1 = base | table_size | PATB_GR;
29 partition_tb->patb1 = cpu_to_be64(patb1);
33 static __ref void *early_alloc_pgtable(unsigned long size)
37 pt = __va(memblock_alloc_base(size, size, MEMBLOCK_ALLOC_ANYWHERE));
43 int radix__map_kernel_page(unsigned long ea, unsigned long pa,
45 unsigned int map_page_size)
52 * Make sure task size is correct as per the max adddr
54 BUILD_BUG_ON(TASK_SIZE_USER64 > RADIX_PGTABLE_RANGE);
55 if (slab_is_available()) {
56 pgdp = pgd_offset_k(ea);
57 pudp = pud_alloc(&init_mm, pgdp, ea);
60 if (map_page_size == PUD_SIZE) {
64 pmdp = pmd_alloc(&init_mm, pudp, ea);
67 if (map_page_size == PMD_SIZE) {
68 ptep = pmdp_ptep(pmdp);
71 ptep = pte_alloc_kernel(pmdp, ea);
75 pgdp = pgd_offset_k(ea);
76 if (pgd_none(*pgdp)) {
77 pudp = early_alloc_pgtable(PUD_TABLE_SIZE);
79 pgd_populate(&init_mm, pgdp, pudp);
81 pudp = pud_offset(pgdp, ea);
82 if (map_page_size == PUD_SIZE) {
86 if (pud_none(*pudp)) {
87 pmdp = early_alloc_pgtable(PMD_TABLE_SIZE);
89 pud_populate(&init_mm, pudp, pmdp);
91 pmdp = pmd_offset(pudp, ea);
92 if (map_page_size == PMD_SIZE) {
93 ptep = pmdp_ptep(pmdp);
96 if (!pmd_present(*pmdp)) {
97 ptep = early_alloc_pgtable(PAGE_SIZE);
99 pmd_populate_kernel(&init_mm, pmdp, ptep);
101 ptep = pte_offset_kernel(pmdp, ea);
105 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, flags));
110 static void __init radix_init_pgtable(void)
113 u64 base, end, start_addr;
114 unsigned long rts_field;
115 struct memblock_region *reg;
116 unsigned long linear_page_size;
118 /* We don't support slb for radix */
121 * Create the linear mapping, using standard page size for now
124 for_each_memblock(memory, reg) {
126 start_addr = reg->base;
129 if (loop_count < 1 && mmu_psize_defs[MMU_PAGE_1G].shift)
130 linear_page_size = PUD_SIZE;
131 else if (loop_count < 2 && mmu_psize_defs[MMU_PAGE_2M].shift)
132 linear_page_size = PMD_SIZE;
134 linear_page_size = PAGE_SIZE;
136 base = _ALIGN_UP(start_addr, linear_page_size);
137 end = _ALIGN_DOWN(reg->base + reg->size, linear_page_size);
139 pr_info("Mapping range 0x%lx - 0x%lx with 0x%lx\n",
140 (unsigned long)base, (unsigned long)end,
144 radix__map_kernel_page((unsigned long)__va(base),
147 base += linear_page_size;
150 * map the rest using lower page size
152 if (end < reg->base + reg->size) {
159 * Allocate Partition table and process table for the
162 BUILD_BUG_ON_MSG((PRTB_SIZE_SHIFT > 36), "Process table size too large.");
163 process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT);
165 * Fill in the process table.
167 rts_field = radix__get_tree_size();
168 process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE);
170 * Fill in the partition table. We are suppose to use effective address
171 * of process table here. But our linear mapping also enable us to use
172 * physical address here.
174 register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12);
175 pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd);
178 static void __init radix_init_partition_table(void)
180 unsigned long rts_field, dw0;
182 mmu_partition_table_init();
183 rts_field = radix__get_tree_size();
184 dw0 = rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE | PATB_HR;
185 mmu_partition_table_set_entry(0, dw0, 0);
187 pr_info("Initializing Radix MMU\n");
188 pr_info("Partition table %p\n", partition_tb);
191 void __init radix_init_native(void)
193 register_process_table = native_register_process_table;
196 static int __init get_idx_from_shift(unsigned int shift)
217 static int __init radix_dt_scan_page_sizes(unsigned long node,
218 const char *uname, int depth,
225 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
227 /* We are scanning "cpu" nodes only */
228 if (type == NULL || strcmp(type, "cpu") != 0)
231 prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size);
235 pr_info("Page sizes from device-tree:\n");
236 for (; size >= 4; size -= 4, ++prop) {
238 struct mmu_psize_def *def;
240 /* top 3 bit is AP encoding */
241 shift = be32_to_cpu(prop[0]) & ~(0xe << 28);
242 ap = be32_to_cpu(prop[0]) >> 29;
243 pr_info("Page size shift = %d AP=0x%x\n", shift, ap);
245 idx = get_idx_from_shift(shift);
249 def = &mmu_psize_defs[idx];
255 cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B;
259 void __init radix__early_init_devtree(void)
264 * Try to find the available page sizes in the device-tree
266 rc = of_scan_flat_dt(radix_dt_scan_page_sizes, NULL);
267 if (rc != 0) /* Found */
270 * let's assume we have page 4k and 64k support
272 mmu_psize_defs[MMU_PAGE_4K].shift = 12;
273 mmu_psize_defs[MMU_PAGE_4K].ap = 0x0;
275 mmu_psize_defs[MMU_PAGE_64K].shift = 16;
276 mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
278 #ifdef CONFIG_SPARSEMEM_VMEMMAP
279 if (mmu_psize_defs[MMU_PAGE_2M].shift) {
281 * map vmemmap using 2M if available
283 mmu_vmemmap_psize = MMU_PAGE_2M;
285 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
289 static void update_hid_for_radix(void)
292 unsigned long rb = 3UL << PPC_BITLSHIFT(53); /* IS = 3 */
294 asm volatile("ptesync": : :"memory");
295 /* prs = 0, ric = 2, rs = 0, r = 1 is = 3 */
296 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
297 : : "r"(rb), "i"(1), "i"(0), "i"(2), "r"(0) : "memory");
298 /* prs = 1, ric = 2, rs = 0, r = 1 is = 3 */
299 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
300 : : "r"(rb), "i"(1), "i"(1), "i"(2), "r"(0) : "memory");
301 asm volatile("eieio; tlbsync; ptesync; isync; slbia": : :"memory");
305 hid0 = mfspr(SPRN_HID0);
306 hid0 |= HID0_POWER9_RADIX;
307 mtspr(SPRN_HID0, hid0);
308 asm volatile("isync": : :"memory");
310 /* Wait for it to happen */
311 while (!(mfspr(SPRN_HID0) & HID0_POWER9_RADIX))
315 static void radix_init_amor(void)
318 * In HV mode, we init AMOR (Authority Mask Override Register) so that
319 * the hypervisor and guest can setup IAMR (Instruction Authority Mask
320 * Register), enable key 0 and set it to 1.
322 * AMOR = 0b1100 .... 0000 (Mask for key 0 is 11)
324 mtspr(SPRN_AMOR, (3ul << 62));
327 static void radix_init_iamr(void)
332 * The IAMR should set to 0 on DD1.
334 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
340 * Radix always uses key0 of the IAMR to determine if an access is
341 * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction
344 mtspr(SPRN_IAMR, iamr);
347 void __init radix__early_init_mmu(void)
351 #ifdef CONFIG_PPC_64K_PAGES
352 /* PAGE_SIZE mappings */
353 mmu_virtual_psize = MMU_PAGE_64K;
355 mmu_virtual_psize = MMU_PAGE_4K;
358 #ifdef CONFIG_SPARSEMEM_VMEMMAP
359 /* vmemmap mapping */
360 mmu_vmemmap_psize = mmu_virtual_psize;
363 * initialize page table size
365 __pte_index_size = RADIX_PTE_INDEX_SIZE;
366 __pmd_index_size = RADIX_PMD_INDEX_SIZE;
367 __pud_index_size = RADIX_PUD_INDEX_SIZE;
368 __pgd_index_size = RADIX_PGD_INDEX_SIZE;
369 __pmd_cache_index = RADIX_PMD_INDEX_SIZE;
370 __pte_table_size = RADIX_PTE_TABLE_SIZE;
371 __pmd_table_size = RADIX_PMD_TABLE_SIZE;
372 __pud_table_size = RADIX_PUD_TABLE_SIZE;
373 __pgd_table_size = RADIX_PGD_TABLE_SIZE;
375 __pmd_val_bits = RADIX_PMD_VAL_BITS;
376 __pud_val_bits = RADIX_PUD_VAL_BITS;
377 __pgd_val_bits = RADIX_PGD_VAL_BITS;
379 __kernel_virt_start = RADIX_KERN_VIRT_START;
380 __kernel_virt_size = RADIX_KERN_VIRT_SIZE;
381 __vmalloc_start = RADIX_VMALLOC_START;
382 __vmalloc_end = RADIX_VMALLOC_END;
383 vmemmap = (struct page *)RADIX_VMEMMAP_BASE;
384 ioremap_bot = IOREMAP_BASE;
387 pci_io_base = ISA_IO_BASE;
391 * For now radix also use the same frag size
393 __pte_frag_nr = H_PTE_FRAG_NR;
394 __pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT;
396 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
398 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
399 update_hid_for_radix();
400 lpcr = mfspr(SPRN_LPCR);
401 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
402 radix_init_partition_table();
406 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
409 radix_init_pgtable();
412 void radix__early_init_mmu_secondary(void)
416 * update partition table control register and UPRT
418 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
420 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
421 update_hid_for_radix();
423 lpcr = mfspr(SPRN_LPCR);
424 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
427 __pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
433 void radix__mmu_cleanup_all(void)
437 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
438 lpcr = mfspr(SPRN_LPCR);
439 mtspr(SPRN_LPCR, lpcr & ~LPCR_UPRT);
441 radix__flush_tlb_all();
445 void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
446 phys_addr_t first_memblock_size)
448 /* We don't currently support the first MEMBLOCK not mapping 0
449 * physical on those processors
451 BUG_ON(first_memblock_base != 0);
453 * We limit the allocation that depend on ppc64_rma_size
454 * to first_memblock_size. We also clamp it to 1GB to
455 * avoid some funky things such as RTAS bugs.
457 * On radix config we really don't have a limitation
458 * on real mode access. But keeping it as above works
461 ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
463 * Finally limit subsequent allocations. We really don't want
464 * to limit the memblock allocations to rma_size. FIXME!! should
465 * we even limit at all ?
467 memblock_set_current_limit(first_memblock_base + first_memblock_size);
470 #ifdef CONFIG_SPARSEMEM_VMEMMAP
471 int __meminit radix__vmemmap_create_mapping(unsigned long start,
472 unsigned long page_size,
475 /* Create a PTE encoding */
476 unsigned long flags = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_KERNEL_RW;
478 BUG_ON(radix__map_kernel_page(start, phys, __pgprot(flags), page_size));
482 #ifdef CONFIG_MEMORY_HOTPLUG
483 void radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size)
485 /* FIXME!! intel does more. We should free page tables mapping vmemmap ? */
490 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
492 unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
493 pmd_t *pmdp, unsigned long clr,
498 #ifdef CONFIG_DEBUG_VM
499 WARN_ON(!radix__pmd_trans_huge(*pmdp));
500 assert_spin_locked(&mm->page_table_lock);
503 old = radix__pte_update(mm, addr, (pte_t *)pmdp, clr, set, 1);
504 trace_hugepage_update(addr, old, clr, set);
509 pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
515 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
516 VM_BUG_ON(radix__pmd_trans_huge(*pmdp));
518 * khugepaged calls this for normal pmd
522 /*FIXME!! Verify whether we need this kick below */
523 kick_all_cpus_sync();
524 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
529 * For us pgtable_t is pte_t *. Inorder to save the deposisted
530 * page table, we consider the allocated page table as a list
531 * head. On withdraw we need to make sure we zero out the used
532 * list_head memory area.
534 void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
537 struct list_head *lh = (struct list_head *) pgtable;
539 assert_spin_locked(pmd_lockptr(mm, pmdp));
542 if (!pmd_huge_pte(mm, pmdp))
545 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
546 pmd_huge_pte(mm, pmdp) = pgtable;
549 pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
553 struct list_head *lh;
555 assert_spin_locked(pmd_lockptr(mm, pmdp));
558 pgtable = pmd_huge_pte(mm, pmdp);
559 lh = (struct list_head *) pgtable;
561 pmd_huge_pte(mm, pmdp) = NULL;
563 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
566 ptep = (pte_t *) pgtable;
574 pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
575 unsigned long addr, pmd_t *pmdp)
580 old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
581 old_pmd = __pmd(old);
583 * Serialize against find_linux_pte_or_hugepte which does lock-less
584 * lookup in page tables with local interrupts disabled. For huge pages
585 * it casts pmd_t to pte_t. Since format of pte_t is different from
586 * pmd_t we want to prevent transit from pmd pointing to page table
587 * to pmd pointing to huge page (and back) while interrupts are disabled.
588 * We clear pmd to possibly replace it with page table pointer in
589 * different code paths. So make sure we wait for the parallel
590 * find_linux_pte_or_hugepage to finish.
592 kick_all_cpus_sync();
596 int radix__has_transparent_hugepage(void)
598 /* For radix 2M at PMD level means thp */
599 if (mmu_psize_defs[MMU_PAGE_2M].shift == PMD_SHIFT)
603 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */