2 * Page table handling routines for radix page table.
4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/sched/mm.h>
12 #include <linux/memblock.h>
13 #include <linux/of_fdt.h>
16 #include <asm/pgtable.h>
17 #include <asm/pgalloc.h>
19 #include <asm/machdep.h>
21 #include <asm/firmware.h>
22 #include <asm/powernv.h>
23 #include <asm/sections.h>
24 #include <asm/trace.h>
26 #include <trace/events/thp.h>
28 unsigned int mmu_pid_bits;
29 unsigned int mmu_base_pid;
31 static int native_register_process_table(unsigned long base, unsigned long pg_sz,
32 unsigned long table_size)
34 unsigned long patb1 = base | table_size | PATB_GR;
36 partition_tb->patb1 = cpu_to_be64(patb1);
40 static __ref void *early_alloc_pgtable(unsigned long size)
44 pt = __va(memblock_alloc_base(size, size, MEMBLOCK_ALLOC_ANYWHERE));
50 int radix__map_kernel_page(unsigned long ea, unsigned long pa,
52 unsigned int map_page_size)
59 * Make sure task size is correct as per the max adddr
61 BUILD_BUG_ON(TASK_SIZE_USER64 > RADIX_PGTABLE_RANGE);
62 if (slab_is_available()) {
63 pgdp = pgd_offset_k(ea);
64 pudp = pud_alloc(&init_mm, pgdp, ea);
67 if (map_page_size == PUD_SIZE) {
71 pmdp = pmd_alloc(&init_mm, pudp, ea);
74 if (map_page_size == PMD_SIZE) {
75 ptep = pmdp_ptep(pmdp);
78 ptep = pte_alloc_kernel(pmdp, ea);
82 pgdp = pgd_offset_k(ea);
83 if (pgd_none(*pgdp)) {
84 pudp = early_alloc_pgtable(PUD_TABLE_SIZE);
86 pgd_populate(&init_mm, pgdp, pudp);
88 pudp = pud_offset(pgdp, ea);
89 if (map_page_size == PUD_SIZE) {
93 if (pud_none(*pudp)) {
94 pmdp = early_alloc_pgtable(PMD_TABLE_SIZE);
96 pud_populate(&init_mm, pudp, pmdp);
98 pmdp = pmd_offset(pudp, ea);
99 if (map_page_size == PMD_SIZE) {
100 ptep = pmdp_ptep(pmdp);
103 if (!pmd_present(*pmdp)) {
104 ptep = early_alloc_pgtable(PAGE_SIZE);
105 BUG_ON(ptep == NULL);
106 pmd_populate_kernel(&init_mm, pmdp, ptep);
108 ptep = pte_offset_kernel(pmdp, ea);
112 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, flags));
117 #ifdef CONFIG_STRICT_KERNEL_RWX
118 void radix__change_memory_range(unsigned long start, unsigned long end,
127 start = ALIGN_DOWN(start, PAGE_SIZE);
128 end = PAGE_ALIGN(end); // aligns up
130 pr_debug("Changing flags on range %lx-%lx removing 0x%lx\n",
133 for (idx = start; idx < end; idx += PAGE_SIZE) {
134 pgdp = pgd_offset_k(idx);
135 pudp = pud_alloc(&init_mm, pgdp, idx);
138 if (pud_huge(*pudp)) {
139 ptep = (pte_t *)pudp;
142 pmdp = pmd_alloc(&init_mm, pudp, idx);
145 if (pmd_huge(*pmdp)) {
146 ptep = pmdp_ptep(pmdp);
149 ptep = pte_alloc_kernel(pmdp, idx);
153 radix__pte_update(&init_mm, idx, ptep, clear, 0, 0);
156 radix__flush_tlb_kernel_range(start, end);
159 void radix__mark_rodata_ro(void)
161 unsigned long start, end;
163 start = (unsigned long)_stext;
164 end = (unsigned long)__init_begin;
166 radix__change_memory_range(start, end, _PAGE_WRITE);
169 void radix__mark_initmem_nx(void)
171 unsigned long start = (unsigned long)__init_begin;
172 unsigned long end = (unsigned long)__init_end;
174 radix__change_memory_range(start, end, _PAGE_EXEC);
176 #endif /* CONFIG_STRICT_KERNEL_RWX */
178 static inline void __meminit print_mapping(unsigned long start,
185 pr_info("Mapped range 0x%lx - 0x%lx with 0x%lx\n", start, end, size);
188 static int __meminit create_physical_mapping(unsigned long start,
191 unsigned long vaddr, addr, mapping_size = 0;
193 unsigned long max_mapping_size;
194 #ifdef CONFIG_STRICT_KERNEL_RWX
195 int split_text_mapping = 1;
197 int split_text_mapping = 0;
200 start = _ALIGN_UP(start, PAGE_SIZE);
201 for (addr = start; addr < end; addr += mapping_size) {
202 unsigned long gap, previous_size;
206 previous_size = mapping_size;
207 max_mapping_size = PUD_SIZE;
210 if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE &&
211 mmu_psize_defs[MMU_PAGE_1G].shift &&
212 PUD_SIZE <= max_mapping_size)
213 mapping_size = PUD_SIZE;
214 else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE &&
215 mmu_psize_defs[MMU_PAGE_2M].shift)
216 mapping_size = PMD_SIZE;
218 mapping_size = PAGE_SIZE;
220 if (split_text_mapping && (mapping_size == PUD_SIZE) &&
221 (addr <= __pa_symbol(__init_begin)) &&
222 (addr + mapping_size) >= __pa_symbol(_stext)) {
223 max_mapping_size = PMD_SIZE;
227 if (split_text_mapping && (mapping_size == PMD_SIZE) &&
228 (addr <= __pa_symbol(__init_begin)) &&
229 (addr + mapping_size) >= __pa_symbol(_stext))
230 mapping_size = PAGE_SIZE;
232 if (mapping_size != previous_size) {
233 print_mapping(start, addr, previous_size);
237 vaddr = (unsigned long)__va(addr);
239 if (overlaps_kernel_text(vaddr, vaddr + mapping_size) ||
240 overlaps_interrupt_vector_text(vaddr, vaddr + mapping_size))
241 prot = PAGE_KERNEL_X;
245 rc = radix__map_kernel_page(vaddr, addr, prot, mapping_size);
250 print_mapping(start, addr, mapping_size);
254 static void __init radix_init_pgtable(void)
256 unsigned long rts_field;
257 struct memblock_region *reg;
259 /* We don't support slb for radix */
262 * Create the linear mapping, using standard page size for now
264 for_each_memblock(memory, reg)
265 WARN_ON(create_physical_mapping(reg->base,
266 reg->base + reg->size));
268 /* Find out how many PID bits are supported */
269 if (cpu_has_feature(CPU_FTR_HVMODE)) {
272 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
274 * When KVM is possible, we only use the top half of the
275 * PID space to avoid collisions between host and guest PIDs
276 * which can cause problems due to prefetch when exiting the
279 mmu_base_pid = 1 << (mmu_pid_bits - 1);
284 /* The guest uses the bottom half of the PID space */
291 * Allocate Partition table and process table for the
294 BUG_ON(PRTB_SIZE_SHIFT > 36);
295 process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT);
297 * Fill in the process table.
299 rts_field = radix__get_tree_size();
300 process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE);
302 * Fill in the partition table. We are suppose to use effective address
303 * of process table here. But our linear mapping also enable us to use
304 * physical address here.
306 register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12);
307 pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd);
308 asm volatile("ptesync" : : : "memory");
309 asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
310 "r" (TLBIEL_INVAL_SET_LPID), "r" (0));
311 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
312 trace_tlbie(0, 0, TLBIEL_INVAL_SET_LPID, 0, 2, 1, 1);
315 static void __init radix_init_partition_table(void)
317 unsigned long rts_field, dw0;
319 mmu_partition_table_init();
320 rts_field = radix__get_tree_size();
321 dw0 = rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE | PATB_HR;
322 mmu_partition_table_set_entry(0, dw0, 0);
324 pr_info("Initializing Radix MMU\n");
325 pr_info("Partition table %p\n", partition_tb);
328 void __init radix_init_native(void)
330 register_process_table = native_register_process_table;
333 static int __init get_idx_from_shift(unsigned int shift)
354 static int __init radix_dt_scan_page_sizes(unsigned long node,
355 const char *uname, int depth,
362 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
364 /* We are scanning "cpu" nodes only */
365 if (type == NULL || strcmp(type, "cpu") != 0)
368 /* Find MMU PID size */
369 prop = of_get_flat_dt_prop(node, "ibm,mmu-pid-bits", &size);
370 if (prop && size == 4)
371 mmu_pid_bits = be32_to_cpup(prop);
373 /* Grab page size encodings */
374 prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size);
378 pr_info("Page sizes from device-tree:\n");
379 for (; size >= 4; size -= 4, ++prop) {
381 struct mmu_psize_def *def;
383 /* top 3 bit is AP encoding */
384 shift = be32_to_cpu(prop[0]) & ~(0xe << 28);
385 ap = be32_to_cpu(prop[0]) >> 29;
386 pr_info("Page size shift = %d AP=0x%x\n", shift, ap);
388 idx = get_idx_from_shift(shift);
392 def = &mmu_psize_defs[idx];
398 cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B;
402 void __init radix__early_init_devtree(void)
407 * Try to find the available page sizes in the device-tree
409 rc = of_scan_flat_dt(radix_dt_scan_page_sizes, NULL);
410 if (rc != 0) /* Found */
413 * let's assume we have page 4k and 64k support
415 mmu_psize_defs[MMU_PAGE_4K].shift = 12;
416 mmu_psize_defs[MMU_PAGE_4K].ap = 0x0;
418 mmu_psize_defs[MMU_PAGE_64K].shift = 16;
419 mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
421 #ifdef CONFIG_SPARSEMEM_VMEMMAP
422 if (mmu_psize_defs[MMU_PAGE_2M].shift) {
424 * map vmemmap using 2M if available
426 mmu_vmemmap_psize = MMU_PAGE_2M;
428 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
432 static void update_hid_for_radix(void)
435 unsigned long rb = 3UL << PPC_BITLSHIFT(53); /* IS = 3 */
437 asm volatile("ptesync": : :"memory");
438 /* prs = 0, ric = 2, rs = 0, r = 1 is = 3 */
439 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
440 : : "r"(rb), "i"(1), "i"(0), "i"(2), "r"(0) : "memory");
441 /* prs = 1, ric = 2, rs = 0, r = 1 is = 3 */
442 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
443 : : "r"(rb), "i"(1), "i"(1), "i"(2), "r"(0) : "memory");
444 asm volatile("eieio; tlbsync; ptesync; isync; slbia": : :"memory");
445 trace_tlbie(0, 0, rb, 0, 2, 0, 1);
446 trace_tlbie(0, 0, rb, 0, 2, 1, 1);
451 hid0 = mfspr(SPRN_HID0);
452 hid0 |= HID0_POWER9_RADIX;
453 mtspr(SPRN_HID0, hid0);
454 asm volatile("isync": : :"memory");
456 /* Wait for it to happen */
457 while (!(mfspr(SPRN_HID0) & HID0_POWER9_RADIX))
461 static void radix_init_amor(void)
464 * In HV mode, we init AMOR (Authority Mask Override Register) so that
465 * the hypervisor and guest can setup IAMR (Instruction Authority Mask
466 * Register), enable key 0 and set it to 1.
468 * AMOR = 0b1100 .... 0000 (Mask for key 0 is 11)
470 mtspr(SPRN_AMOR, (3ul << 62));
473 static void radix_init_iamr(void)
478 * The IAMR should set to 0 on DD1.
480 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
486 * Radix always uses key0 of the IAMR to determine if an access is
487 * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction
490 mtspr(SPRN_IAMR, iamr);
493 void __init radix__early_init_mmu(void)
497 #ifdef CONFIG_PPC_64K_PAGES
498 /* PAGE_SIZE mappings */
499 mmu_virtual_psize = MMU_PAGE_64K;
501 mmu_virtual_psize = MMU_PAGE_4K;
504 #ifdef CONFIG_SPARSEMEM_VMEMMAP
505 /* vmemmap mapping */
506 mmu_vmemmap_psize = mmu_virtual_psize;
509 * initialize page table size
511 __pte_index_size = RADIX_PTE_INDEX_SIZE;
512 __pmd_index_size = RADIX_PMD_INDEX_SIZE;
513 __pud_index_size = RADIX_PUD_INDEX_SIZE;
514 __pgd_index_size = RADIX_PGD_INDEX_SIZE;
515 __pmd_cache_index = RADIX_PMD_INDEX_SIZE;
516 __pte_table_size = RADIX_PTE_TABLE_SIZE;
517 __pmd_table_size = RADIX_PMD_TABLE_SIZE;
518 __pud_table_size = RADIX_PUD_TABLE_SIZE;
519 __pgd_table_size = RADIX_PGD_TABLE_SIZE;
521 __pmd_val_bits = RADIX_PMD_VAL_BITS;
522 __pud_val_bits = RADIX_PUD_VAL_BITS;
523 __pgd_val_bits = RADIX_PGD_VAL_BITS;
525 __kernel_virt_start = RADIX_KERN_VIRT_START;
526 __kernel_virt_size = RADIX_KERN_VIRT_SIZE;
527 __vmalloc_start = RADIX_VMALLOC_START;
528 __vmalloc_end = RADIX_VMALLOC_END;
529 vmemmap = (struct page *)RADIX_VMEMMAP_BASE;
530 ioremap_bot = IOREMAP_BASE;
533 pci_io_base = ISA_IO_BASE;
537 * For now radix also use the same frag size
539 __pte_frag_nr = H_PTE_FRAG_NR;
540 __pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT;
542 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
544 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
545 update_hid_for_radix();
546 lpcr = mfspr(SPRN_LPCR);
547 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
548 radix_init_partition_table();
551 radix_init_pseries();
554 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
557 radix_init_pgtable();
560 void radix__early_init_mmu_secondary(void)
564 * update partition table control register and UPRT
566 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
568 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
569 update_hid_for_radix();
571 lpcr = mfspr(SPRN_LPCR);
572 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
575 __pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
581 void radix__mmu_cleanup_all(void)
585 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
586 lpcr = mfspr(SPRN_LPCR);
587 mtspr(SPRN_LPCR, lpcr & ~LPCR_UPRT);
589 powernv_set_nmmu_ptcr(0);
590 radix__flush_tlb_all();
594 void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
595 phys_addr_t first_memblock_size)
597 /* We don't currently support the first MEMBLOCK not mapping 0
598 * physical on those processors
600 BUG_ON(first_memblock_base != 0);
602 * We limit the allocation that depend on ppc64_rma_size
603 * to first_memblock_size. We also clamp it to 1GB to
604 * avoid some funky things such as RTAS bugs.
606 * On radix config we really don't have a limitation
607 * on real mode access. But keeping it as above works
610 ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
612 * Finally limit subsequent allocations. We really don't want
613 * to limit the memblock allocations to rma_size. FIXME!! should
614 * we even limit at all ?
616 memblock_set_current_limit(first_memblock_base + first_memblock_size);
619 #ifdef CONFIG_MEMORY_HOTPLUG
620 static void free_pte_table(pte_t *pte_start, pmd_t *pmd)
625 for (i = 0; i < PTRS_PER_PTE; i++) {
631 pte_free_kernel(&init_mm, pte_start);
635 static void free_pmd_table(pmd_t *pmd_start, pud_t *pud)
640 for (i = 0; i < PTRS_PER_PMD; i++) {
646 pmd_free(&init_mm, pmd_start);
650 static void remove_pte_table(pte_t *pte_start, unsigned long addr,
656 pte = pte_start + pte_index(addr);
657 for (; addr < end; addr = next, pte++) {
658 next = (addr + PAGE_SIZE) & PAGE_MASK;
662 if (!pte_present(*pte))
665 if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(next)) {
667 * The vmemmap_free() and remove_section_mapping()
668 * codepaths call us with aligned addresses.
670 WARN_ONCE(1, "%s: unaligned range\n", __func__);
674 pte_clear(&init_mm, addr, pte);
678 static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
685 pmd = pmd_start + pmd_index(addr);
686 for (; addr < end; addr = next, pmd++) {
687 next = pmd_addr_end(addr, end);
689 if (!pmd_present(*pmd))
692 if (pmd_huge(*pmd)) {
693 if (!IS_ALIGNED(addr, PMD_SIZE) ||
694 !IS_ALIGNED(next, PMD_SIZE)) {
695 WARN_ONCE(1, "%s: unaligned range\n", __func__);
699 pte_clear(&init_mm, addr, (pte_t *)pmd);
703 pte_base = (pte_t *)pmd_page_vaddr(*pmd);
704 remove_pte_table(pte_base, addr, next);
705 free_pte_table(pte_base, pmd);
709 static void remove_pud_table(pud_t *pud_start, unsigned long addr,
716 pud = pud_start + pud_index(addr);
717 for (; addr < end; addr = next, pud++) {
718 next = pud_addr_end(addr, end);
720 if (!pud_present(*pud))
723 if (pud_huge(*pud)) {
724 if (!IS_ALIGNED(addr, PUD_SIZE) ||
725 !IS_ALIGNED(next, PUD_SIZE)) {
726 WARN_ONCE(1, "%s: unaligned range\n", __func__);
730 pte_clear(&init_mm, addr, (pte_t *)pud);
734 pmd_base = (pmd_t *)pud_page_vaddr(*pud);
735 remove_pmd_table(pmd_base, addr, next);
736 free_pmd_table(pmd_base, pud);
740 static void remove_pagetable(unsigned long start, unsigned long end)
742 unsigned long addr, next;
746 spin_lock(&init_mm.page_table_lock);
748 for (addr = start; addr < end; addr = next) {
749 next = pgd_addr_end(addr, end);
751 pgd = pgd_offset_k(addr);
752 if (!pgd_present(*pgd))
755 if (pgd_huge(*pgd)) {
756 if (!IS_ALIGNED(addr, PGDIR_SIZE) ||
757 !IS_ALIGNED(next, PGDIR_SIZE)) {
758 WARN_ONCE(1, "%s: unaligned range\n", __func__);
762 pte_clear(&init_mm, addr, (pte_t *)pgd);
766 pud_base = (pud_t *)pgd_page_vaddr(*pgd);
767 remove_pud_table(pud_base, addr, next);
770 spin_unlock(&init_mm.page_table_lock);
771 radix__flush_tlb_kernel_range(start, end);
774 int __ref radix__create_section_mapping(unsigned long start, unsigned long end)
776 return create_physical_mapping(start, end);
779 int radix__remove_section_mapping(unsigned long start, unsigned long end)
781 remove_pagetable(start, end);
784 #endif /* CONFIG_MEMORY_HOTPLUG */
786 #ifdef CONFIG_SPARSEMEM_VMEMMAP
787 int __meminit radix__vmemmap_create_mapping(unsigned long start,
788 unsigned long page_size,
791 /* Create a PTE encoding */
792 unsigned long flags = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_KERNEL_RW;
794 BUG_ON(radix__map_kernel_page(start, phys, __pgprot(flags), page_size));
798 #ifdef CONFIG_MEMORY_HOTPLUG
799 void radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size)
801 remove_pagetable(start, start + page_size);
806 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
808 unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
809 pmd_t *pmdp, unsigned long clr,
814 #ifdef CONFIG_DEBUG_VM
815 WARN_ON(!radix__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
816 assert_spin_locked(&mm->page_table_lock);
819 old = radix__pte_update(mm, addr, (pte_t *)pmdp, clr, set, 1);
820 trace_hugepage_update(addr, old, clr, set);
825 pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
831 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
832 VM_BUG_ON(radix__pmd_trans_huge(*pmdp));
833 VM_BUG_ON(pmd_devmap(*pmdp));
835 * khugepaged calls this for normal pmd
839 /*FIXME!! Verify whether we need this kick below */
840 kick_all_cpus_sync();
841 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
846 * For us pgtable_t is pte_t *. Inorder to save the deposisted
847 * page table, we consider the allocated page table as a list
848 * head. On withdraw we need to make sure we zero out the used
849 * list_head memory area.
851 void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
854 struct list_head *lh = (struct list_head *) pgtable;
856 assert_spin_locked(pmd_lockptr(mm, pmdp));
859 if (!pmd_huge_pte(mm, pmdp))
862 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
863 pmd_huge_pte(mm, pmdp) = pgtable;
866 pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
870 struct list_head *lh;
872 assert_spin_locked(pmd_lockptr(mm, pmdp));
875 pgtable = pmd_huge_pte(mm, pmdp);
876 lh = (struct list_head *) pgtable;
878 pmd_huge_pte(mm, pmdp) = NULL;
880 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
883 ptep = (pte_t *) pgtable;
891 pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
892 unsigned long addr, pmd_t *pmdp)
897 old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
898 old_pmd = __pmd(old);
900 * Serialize against find_linux_pte_or_hugepte which does lock-less
901 * lookup in page tables with local interrupts disabled. For huge pages
902 * it casts pmd_t to pte_t. Since format of pte_t is different from
903 * pmd_t we want to prevent transit from pmd pointing to page table
904 * to pmd pointing to huge page (and back) while interrupts are disabled.
905 * We clear pmd to possibly replace it with page table pointer in
906 * different code paths. So make sure we wait for the parallel
907 * find_linux_pte_or_hugepage to finish.
909 kick_all_cpus_sync();
913 int radix__has_transparent_hugepage(void)
915 /* For radix 2M at PMD level means thp */
916 if (mmu_psize_defs[MMU_PAGE_2M].shift == PMD_SHIFT)
920 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */