1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_64_HASH_64K_H
3 #define _ASM_POWERPC_BOOK3S_64_HASH_64K_H
5 #define H_PTE_INDEX_SIZE 8
6 #define H_PMD_INDEX_SIZE 10
7 #define H_PUD_INDEX_SIZE 10
8 #define H_PGD_INDEX_SIZE 8
11 * Each context is 512TB size. SLB miss for first context/default context
12 * is handled in the hotpath.
14 #define MAX_EA_BITS_PER_CONTEXT 49
17 * 64k aligned address free up few of the lower bits of RPN for us
18 * We steal that here. For more deatils look at pte_pfn/pfn_pte()
20 #define H_PAGE_COMBO _RPAGE_RPN0 /* this is a combo 4k page */
21 #define H_PAGE_4K_PFN _RPAGE_RPN1 /* PFN is for a single 4k page */
22 #define H_PAGE_BUSY _RPAGE_RPN44 /* software: PTE & hash are busy */
23 #define H_PAGE_HASHPTE _RPAGE_RPN43 /* PTE has associated HPTE */
25 /* memory key bits. */
26 #define H_PTE_PKEY_BIT0 _RPAGE_RSV1
27 #define H_PTE_PKEY_BIT1 _RPAGE_RSV2
28 #define H_PTE_PKEY_BIT2 _RPAGE_RSV3
29 #define H_PTE_PKEY_BIT3 _RPAGE_RSV4
30 #define H_PTE_PKEY_BIT4 _RPAGE_RSV5
33 * We need to differentiate between explicit huge page and THP huge
34 * page, since THP huge page also need to track real subpage details
36 #define H_PAGE_THP_HUGE H_PAGE_4K_PFN
38 /* PTE flags to conserve for HPTE identification */
39 #define _PAGE_HPTEFLAGS (H_PAGE_BUSY | H_PAGE_HASHPTE | H_PAGE_COMBO)
41 * We use a 2K PTE page fragment and another 2K for storing
42 * real_pte_t hash index
43 * 8 bytes per each pte entry and another 8 bytes for storing
46 #define H_PTE_FRAG_SIZE_SHIFT (H_PTE_INDEX_SIZE + 3 + 1)
47 #define H_PTE_FRAG_NR (PAGE_SIZE >> H_PTE_FRAG_SIZE_SHIFT)
50 #include <asm/errno.h>
53 * With 64K pages on hash table, we have a special PTE format that
54 * uses a second "half" of the page table to encode sub-page information
55 * in order to deal with 64K made of 4K HW pages. Thus we override the
56 * generic accessors and iterators here
58 #define __real_pte __real_pte
59 static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep, int offset)
67 * Ensure that we do not read the hidx before we read the PTE. Because
68 * the writer side is expected to finish writing the hidx first followed
69 * by the PTE, by using smp_wmb(). pte_set_hash_slot() ensures that.
73 hidxp = (unsigned long *)(ptep + offset);
79 * shift the hidx representation by one-modulo-0xf; i.e hidx 0 is respresented
80 * as 1, 1 as 2,... , and 0xf as 0. This convention lets us represent a
81 * invalid hidx 0xf with a 0x0 bit value. PTEs are anyway zero'd when
82 * allocated. We dont have to zero them gain; thus save on the initialization.
84 #define HIDX_UNSHIFT_BY_ONE(x) ((x + 0xfUL) & 0xfUL) /* shift backward by one */
85 #define HIDX_SHIFT_BY_ONE(x) ((x + 0x1UL) & 0xfUL) /* shift forward by one */
86 #define HIDX_BITS(x, index) (x << (index << 2))
87 #define BITS_TO_HIDX(x, index) ((x >> (index << 2)) & 0xfUL)
88 #define INVALID_RPTE_HIDX 0x0UL
90 static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index)
92 return HIDX_UNSHIFT_BY_ONE(BITS_TO_HIDX(rpte.hidx, index));
96 * Commit the hidx and return PTE bits that needs to be modified. The caller is
97 * expected to modify the PTE bits accordingly and commit the PTE to memory.
99 static inline unsigned long pte_set_hidx(pte_t *ptep, real_pte_t rpte,
100 unsigned int subpg_index,
101 unsigned long hidx, int offset)
103 unsigned long *hidxp = (unsigned long *)(ptep + offset);
105 rpte.hidx &= ~HIDX_BITS(0xfUL, subpg_index);
106 *hidxp = rpte.hidx | HIDX_BITS(HIDX_SHIFT_BY_ONE(hidx), subpg_index);
109 * Anyone reading PTE must ensure hidx bits are read after reading the
110 * PTE by using the read-side barrier smp_rmb(). __real_pte() can be
115 /* No PTE bits to be modified, return 0x0UL */
119 #define __rpte_to_pte(r) ((r).pte)
120 extern bool __rpte_sub_valid(real_pte_t rpte, unsigned long index);
122 * Trick: we set __end to va + 64k, which happens works for
123 * a 16M page as well as we want only one iteration
125 #define pte_iterate_hashed_subpages(rpte, psize, vpn, index, shift) \
127 unsigned long __end = vpn + (1UL << (PAGE_SHIFT - VPN_SHIFT)); \
128 unsigned __split = (psize == MMU_PAGE_4K || \
129 psize == MMU_PAGE_64K_AP); \
130 shift = mmu_psize_defs[psize].shift; \
131 for (index = 0; vpn < __end; index++, \
132 vpn += (1L << (shift - VPN_SHIFT))) { \
133 if (!__split || __rpte_sub_valid(rpte, index)) \
136 #define pte_iterate_hashed_end() } while(0); } } while(0)
138 #define pte_pagesize_index(mm, addr, pte) \
139 (((pte) & H_PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K)
141 extern int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
142 unsigned long pfn, unsigned long size, pgprot_t);
143 static inline int hash__remap_4k_pfn(struct vm_area_struct *vma, unsigned long addr,
144 unsigned long pfn, pgprot_t prot)
146 if (pfn > (PTE_RPN_MASK >> PAGE_SHIFT)) {
147 WARN(1, "remap_4k_pfn called with wrong pfn value\n");
150 return remap_pfn_range(vma, addr, pfn, PAGE_SIZE,
151 __pgprot(pgprot_val(prot) | H_PAGE_4K_PFN));
154 #define H_PTE_TABLE_SIZE PTE_FRAG_SIZE
155 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined (CONFIG_HUGETLB_PAGE)
156 #define H_PMD_TABLE_SIZE ((sizeof(pmd_t) << PMD_INDEX_SIZE) + \
157 (sizeof(unsigned long) << PMD_INDEX_SIZE))
159 #define H_PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
161 #ifdef CONFIG_HUGETLB_PAGE
162 #define H_PUD_TABLE_SIZE ((sizeof(pud_t) << PUD_INDEX_SIZE) + \
163 (sizeof(unsigned long) << PUD_INDEX_SIZE))
165 #define H_PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE)
167 #define H_PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
169 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
170 static inline char *get_hpte_slot_array(pmd_t *pmdp)
173 * The hpte hindex is stored in the pgtable whose address is in the
174 * second half of the PMD
176 * Order this load with the test for pmd_trans_huge in the caller
179 return *(char **)(pmdp + PTRS_PER_PMD);
184 * The linux hugepage PMD now include the pmd entries followed by the address
185 * to the stashed pgtable_t. The stashed pgtable_t contains the hpte bits.
186 * [ 000 | 1 bit secondary | 3 bit hidx | 1 bit valid]. We use one byte per
187 * each HPTE entry. With 16MB hugepage and 64K HPTE we need 256 entries and
188 * with 4K HPTE we need 4096 entries. Both will fit in a 4K pgtable_t.
190 * The top three bits are intentionally left as zero. This memory location
191 * are also used as normal page PTE pointers. So if we have any pointers
192 * left around while we collapse a hugepage, we need to make sure
193 * _PAGE_PRESENT bit of that is zero when we look at them
195 static inline unsigned int hpte_valid(unsigned char *hpte_slot_array, int index)
197 return hpte_slot_array[index] & 0x1;
200 static inline unsigned int hpte_hash_index(unsigned char *hpte_slot_array,
203 return hpte_slot_array[index] >> 1;
206 static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array,
207 unsigned int index, unsigned int hidx)
209 hpte_slot_array[index] = (hidx << 1) | 0x1;
214 * For core kernel code by design pmd_trans_huge is never run on any hugetlbfs
215 * page. The hugetlbfs page table walking and mangling paths are totally
216 * separated form the core VM paths and they're differentiated by
217 * VM_HUGETLB being set on vm_flags well before any pmd_trans_huge could run.
219 * pmd_trans_huge() is defined as false at build time if
220 * CONFIG_TRANSPARENT_HUGEPAGE=n to optimize away code blocks at build
223 * For ppc64 we need to differntiate from explicit hugepages from THP, because
224 * for THP we also track the subpage details at the pmd level. We don't do
225 * that for explicit huge pages.
228 static inline int hash__pmd_trans_huge(pmd_t pmd)
230 return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE)) ==
231 (_PAGE_PTE | H_PAGE_THP_HUGE));
234 static inline int hash__pmd_same(pmd_t pmd_a, pmd_t pmd_b)
236 return (((pmd_raw(pmd_a) ^ pmd_raw(pmd_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0);
239 static inline pmd_t hash__pmd_mkhuge(pmd_t pmd)
241 return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE));
244 extern unsigned long hash__pmd_hugepage_update(struct mm_struct *mm,
245 unsigned long addr, pmd_t *pmdp,
246 unsigned long clr, unsigned long set);
247 extern pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma,
248 unsigned long address, pmd_t *pmdp);
249 extern void hash__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
251 extern pgtable_t hash__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
252 extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
253 unsigned long addr, pmd_t *pmdp);
254 extern int hash__has_transparent_hugepage(void);
255 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
256 #endif /* __ASSEMBLY__ */
258 #endif /* _ASM_POWERPC_BOOK3S_64_HASH_64K_H */