1 #ifndef _ASM_POWERPC_PGTABLE_RADIX_H
2 #define _ASM_POWERPC_PGTABLE_RADIX_H
5 #include <asm/cmpxchg.h>
8 #ifdef CONFIG_PPC_64K_PAGES
9 #include <asm/book3s/64/radix-64k.h>
11 #include <asm/book3s/64/radix-4k.h>
15 * For P9 DD1 only, we need to track whether the pte's huge.
17 #define R_PAGE_LARGE _RPAGE_RSV1
21 #include <asm/book3s/64/tlbflush-radix.h>
22 #include <asm/cpu_has_feature.h>
25 /* An empty PTE can still have a R or C writeback */
26 #define RADIX_PTE_NONE_MASK (_PAGE_DIRTY | _PAGE_ACCESSED)
28 /* Bits to set in a RPMD/RPUD/RPGD */
29 #define RADIX_PMD_VAL_BITS (0x8000000000000000UL | RADIX_PTE_INDEX_SIZE)
30 #define RADIX_PUD_VAL_BITS (0x8000000000000000UL | RADIX_PMD_INDEX_SIZE)
31 #define RADIX_PGD_VAL_BITS (0x8000000000000000UL | RADIX_PUD_INDEX_SIZE)
33 /* Don't have anything in the reserved bits and leaf bits */
34 #define RADIX_PMD_BAD_BITS 0x60000000000000e0UL
35 #define RADIX_PUD_BAD_BITS 0x60000000000000e0UL
36 #define RADIX_PGD_BAD_BITS 0x60000000000000e0UL
39 * Size of EA range mapped by our pagetables.
41 #define RADIX_PGTABLE_EADDR_SIZE (RADIX_PTE_INDEX_SIZE + RADIX_PMD_INDEX_SIZE + \
42 RADIX_PUD_INDEX_SIZE + RADIX_PGD_INDEX_SIZE + PAGE_SHIFT)
43 #define RADIX_PGTABLE_RANGE (ASM_CONST(1) << RADIX_PGTABLE_EADDR_SIZE)
46 * We support 52 bit address space, Use top bit for kernel
47 * virtual mapping. Also make sure kernel fit in the top
50 * +------------------+
51 * +------------------+ Kernel virtual map (0xc008000000000000)
55 * 0b11......+------------------+ Kernel linear map (0xc....)
59 * 0b10......+------------------+
63 * 0b01......+------------------+
67 * 0b00......+------------------+
70 * 3rd quadrant expanded:
71 * +------------------------------+
75 * +------------------------------+ Kernel IO map end (0xc010000000000000)
78 * | 1/2 of virtual map |
81 * +------------------------------+ Kernel IO map start
83 * | 1/4 of virtual map |
85 * +------------------------------+ Kernel vmemap start
87 * | 1/4 of virtual map |
89 * +------------------------------+ Kernel virt start (0xc008000000000000)
93 * +------------------------------+ Kernel linear (0xc.....)
96 #define RADIX_KERN_VIRT_START ASM_CONST(0xc008000000000000)
97 #define RADIX_KERN_VIRT_SIZE ASM_CONST(0x0008000000000000)
100 * The vmalloc space starts at the beginning of that region, and
101 * occupies a quarter of it on radix config.
102 * (we keep a quarter for the virtual memmap)
104 #define RADIX_VMALLOC_START RADIX_KERN_VIRT_START
105 #define RADIX_VMALLOC_SIZE (RADIX_KERN_VIRT_SIZE >> 2)
106 #define RADIX_VMALLOC_END (RADIX_VMALLOC_START + RADIX_VMALLOC_SIZE)
108 * Defines the address of the vmemap area, in its own region on
111 #define RADIX_VMEMMAP_BASE (RADIX_VMALLOC_END)
114 #define RADIX_PTE_TABLE_SIZE (sizeof(pte_t) << RADIX_PTE_INDEX_SIZE)
115 #define RADIX_PMD_TABLE_SIZE (sizeof(pmd_t) << RADIX_PMD_INDEX_SIZE)
116 #define RADIX_PUD_TABLE_SIZE (sizeof(pud_t) << RADIX_PUD_INDEX_SIZE)
117 #define RADIX_PGD_TABLE_SIZE (sizeof(pgd_t) << RADIX_PGD_INDEX_SIZE)
119 #ifdef CONFIG_STRICT_KERNEL_RWX
120 extern void radix__mark_rodata_ro(void);
121 extern void radix__mark_initmem_nx(void);
124 static inline unsigned long __radix_pte_update(pte_t *ptep, unsigned long clr,
128 unsigned long old_pte, new_pte;
131 pte = READ_ONCE(*ptep);
132 old_pte = pte_val(pte);
133 new_pte = (old_pte | set) & ~clr;
135 } while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
141 static inline unsigned long radix__pte_update(struct mm_struct *mm,
143 pte_t *ptep, unsigned long clr,
147 unsigned long old_pte;
149 if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
151 unsigned long new_pte;
153 old_pte = __radix_pte_update(ptep, ~0ul, 0);
157 new_pte = (old_pte | set) & ~clr;
158 radix__flush_tlb_pte_p9_dd1(old_pte, mm, addr);
160 __radix_pte_update(ptep, 0, new_pte);
162 old_pte = __radix_pte_update(ptep, clr, set);
164 assert_pte_locked(mm, addr);
169 static inline pte_t radix__ptep_get_and_clear_full(struct mm_struct *mm,
171 pte_t *ptep, int full)
173 unsigned long old_pte;
177 * If we are trying to clear the pte, we can skip
178 * the DD1 pte update sequence and batch the tlb flush. The
179 * tlb flush batching is done by mmu gather code. We
180 * still keep the cmp_xchg update to make sure we get
181 * correct R/C bit which might be updated via Nest MMU.
183 old_pte = __radix_pte_update(ptep, ~0ul, 0);
185 old_pte = radix__pte_update(mm, addr, ptep, ~0ul, 0, 0);
187 return __pte(old_pte);
191 * Set the dirty and/or accessed bits atomically in a linux PTE, this
192 * function doesn't need to invalidate tlb.
194 static inline void radix__ptep_set_access_flags(struct mm_struct *mm,
195 pte_t *ptep, pte_t entry,
196 unsigned long address)
199 unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
200 _PAGE_RW | _PAGE_EXEC);
202 if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
204 unsigned long old_pte, new_pte;
206 old_pte = __radix_pte_update(ptep, ~0, 0);
210 new_pte = old_pte | set;
211 radix__flush_tlb_pte_p9_dd1(old_pte, mm, address);
212 __radix_pte_update(ptep, 0, new_pte);
214 __radix_pte_update(ptep, 0, set);
215 asm volatile("ptesync" : : : "memory");
218 static inline int radix__pte_same(pte_t pte_a, pte_t pte_b)
220 return ((pte_raw(pte_a) ^ pte_raw(pte_b)) == 0);
223 static inline int radix__pte_none(pte_t pte)
225 return (pte_val(pte) & ~RADIX_PTE_NONE_MASK) == 0;
228 static inline void radix__set_pte_at(struct mm_struct *mm, unsigned long addr,
229 pte_t *ptep, pte_t pte, int percpu)
232 asm volatile("ptesync" : : : "memory");
235 static inline int radix__pmd_bad(pmd_t pmd)
237 return !!(pmd_val(pmd) & RADIX_PMD_BAD_BITS);
240 static inline int radix__pmd_same(pmd_t pmd_a, pmd_t pmd_b)
242 return ((pmd_raw(pmd_a) ^ pmd_raw(pmd_b)) == 0);
245 static inline int radix__pud_bad(pud_t pud)
247 return !!(pud_val(pud) & RADIX_PUD_BAD_BITS);
251 static inline int radix__pgd_bad(pgd_t pgd)
253 return !!(pgd_val(pgd) & RADIX_PGD_BAD_BITS);
256 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
258 static inline int radix__pmd_trans_huge(pmd_t pmd)
260 return (pmd_val(pmd) & (_PAGE_PTE | _PAGE_DEVMAP)) == _PAGE_PTE;
263 static inline pmd_t radix__pmd_mkhuge(pmd_t pmd)
265 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
266 return __pmd(pmd_val(pmd) | _PAGE_PTE | R_PAGE_LARGE);
267 return __pmd(pmd_val(pmd) | _PAGE_PTE);
269 static inline void radix__pmdp_huge_split_prepare(struct vm_area_struct *vma,
270 unsigned long address, pmd_t *pmdp)
272 /* Nothing to do for radix. */
276 extern unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
277 pmd_t *pmdp, unsigned long clr,
279 extern pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma,
280 unsigned long address, pmd_t *pmdp);
281 extern void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
283 extern pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
284 extern pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
285 unsigned long addr, pmd_t *pmdp);
286 extern int radix__has_transparent_hugepage(void);
289 extern int __meminit radix__vmemmap_create_mapping(unsigned long start,
290 unsigned long page_size,
292 extern void radix__vmemmap_remove_mapping(unsigned long start,
293 unsigned long page_size);
295 extern int radix__map_kernel_page(unsigned long ea, unsigned long pa,
296 pgprot_t flags, unsigned int psz);
298 static inline unsigned long radix__get_tree_size(void)
300 unsigned long rts_field;
302 * We support 52 bits, hence:
303 * DD1 52-28 = 24, 0b11000
304 * Others 52-31 = 21, 0b10101
305 * RTS encoding details
306 * bits 0 - 3 of rts -> bits 6 - 8 unsigned long
307 * bits 4 - 5 of rts -> bits 62 - 63 of unsigned long
309 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
310 rts_field = (0x3UL << 61);
312 rts_field = (0x5UL << 5); /* 6 - 8 bits */
313 rts_field |= (0x2UL << 61);
318 #ifdef CONFIG_MEMORY_HOTPLUG
319 int radix__create_section_mapping(unsigned long start, unsigned long end);
320 int radix__remove_section_mapping(unsigned long start, unsigned long end);
321 #endif /* CONFIG_MEMORY_HOTPLUG */
322 #endif /* __ASSEMBLY__ */