]> asedeno.scripts.mit.edu Git - linux.git/blob - arch/powerpc/include/asm/book3s/64/hash.h
powerpc/mm: Drop VM_BUG_ON in get_region_id()
[linux.git] / arch / powerpc / include / asm / book3s / 64 / hash.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_64_HASH_H
3 #define _ASM_POWERPC_BOOK3S_64_HASH_H
4 #ifdef __KERNEL__
5
6 #include <asm/asm-const.h>
7
8 /*
9  * Common bits between 4K and 64K pages in a linux-style PTE.
10  * Additional bits may be defined in pgtable-hash64-*.h
11  *
12  */
13 #define H_PTE_NONE_MASK         _PAGE_HPTEFLAGS
14
15 #ifdef CONFIG_PPC_64K_PAGES
16 #include <asm/book3s/64/hash-64k.h>
17 #else
18 #include <asm/book3s/64/hash-4k.h>
19 #endif
20
21 /* Bits to set in a PMD/PUD/PGD entry valid bit*/
22 #define HASH_PMD_VAL_BITS               (0x8000000000000000UL)
23 #define HASH_PUD_VAL_BITS               (0x8000000000000000UL)
24 #define HASH_PGD_VAL_BITS               (0x8000000000000000UL)
25
26 /*
27  * Size of EA range mapped by our pagetables.
28  */
29 #define H_PGTABLE_EADDR_SIZE    (H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE + \
30                                  H_PUD_INDEX_SIZE + H_PGD_INDEX_SIZE + PAGE_SHIFT)
31 #define H_PGTABLE_RANGE         (ASM_CONST(1) << H_PGTABLE_EADDR_SIZE)
32 /*
33  * Top 2 bits are ignored in page table walk.
34  */
35 #define EA_MASK                 (~(0xcUL << 60))
36
37 /*
38  * We store the slot details in the second half of page table.
39  * Increase the pud level table so that hugetlb ptes can be stored
40  * at pud level.
41  */
42 #if defined(CONFIG_HUGETLB_PAGE) &&  defined(CONFIG_PPC_64K_PAGES)
43 #define H_PUD_CACHE_INDEX       (H_PUD_INDEX_SIZE + 1)
44 #else
45 #define H_PUD_CACHE_INDEX       (H_PUD_INDEX_SIZE)
46 #endif
47
48 /*
49  * +------------------------------+
50  * |                              |
51  * |                              |
52  * |                              |
53  * +------------------------------+  Kernel virtual map end (0xc00e000000000000)
54  * |                              |
55  * |                              |
56  * |      512TB/16TB of vmemmap   |
57  * |                              |
58  * |                              |
59  * +------------------------------+  Kernel vmemmap  start
60  * |                              |
61  * |      512TB/16TB of IO map    |
62  * |                              |
63  * +------------------------------+  Kernel IO map start
64  * |                              |
65  * |      512TB/16TB of vmap      |
66  * |                              |
67  * +------------------------------+  Kernel virt start (0xc008000000000000)
68  * |                              |
69  * |                              |
70  * |                              |
71  * +------------------------------+  Kernel linear (0xc.....)
72  */
73
74 #define H_VMALLOC_START         H_KERN_VIRT_START
75 #define H_VMALLOC_SIZE          H_KERN_MAP_SIZE
76 #define H_VMALLOC_END           (H_VMALLOC_START + H_VMALLOC_SIZE)
77
78 #define H_KERN_IO_START         H_VMALLOC_END
79 #define H_KERN_IO_SIZE          H_KERN_MAP_SIZE
80 #define H_KERN_IO_END           (H_KERN_IO_START + H_KERN_IO_SIZE)
81
82 #define H_VMEMMAP_START         H_KERN_IO_END
83 #define H_VMEMMAP_SIZE          H_KERN_MAP_SIZE
84 #define H_VMEMMAP_END           (H_VMEMMAP_START + H_VMEMMAP_SIZE)
85
86 #define NON_LINEAR_REGION_ID(ea)        ((((unsigned long)ea - H_KERN_VIRT_START) >> REGION_SHIFT) + 2)
87
88 /*
89  * Region IDs
90  */
91 #define USER_REGION_ID          0
92 #define LINEAR_MAP_REGION_ID    1
93 #define VMALLOC_REGION_ID       NON_LINEAR_REGION_ID(H_VMALLOC_START)
94 #define IO_REGION_ID            NON_LINEAR_REGION_ID(H_KERN_IO_START)
95 #define VMEMMAP_REGION_ID       NON_LINEAR_REGION_ID(H_VMEMMAP_START)
96
97 /*
98  * Defines the address of the vmemap area, in its own region on
99  * hash table CPUs.
100  */
101 #ifdef CONFIG_PPC_MM_SLICES
102 #define HAVE_ARCH_UNMAPPED_AREA
103 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
104 #endif /* CONFIG_PPC_MM_SLICES */
105
106 /* PTEIDX nibble */
107 #define _PTEIDX_SECONDARY       0x8
108 #define _PTEIDX_GROUP_IX        0x7
109
110 #define H_PMD_BAD_BITS          (PTE_TABLE_SIZE-1)
111 #define H_PUD_BAD_BITS          (PMD_TABLE_SIZE-1)
112
113 #ifndef __ASSEMBLY__
114 static inline int get_region_id(unsigned long ea)
115 {
116         int region_id;
117         int id = (ea >> 60UL);
118
119         if (id == 0)
120                 return USER_REGION_ID;
121
122         if (ea < H_KERN_VIRT_START)
123                 return LINEAR_MAP_REGION_ID;
124
125         BUILD_BUG_ON(NON_LINEAR_REGION_ID(H_VMALLOC_START) != 2);
126
127         region_id = NON_LINEAR_REGION_ID(ea);
128         return region_id;
129 }
130
131 #define hash__pmd_bad(pmd)              (pmd_val(pmd) & H_PMD_BAD_BITS)
132 #define hash__pud_bad(pud)              (pud_val(pud) & H_PUD_BAD_BITS)
133 static inline int hash__pgd_bad(pgd_t pgd)
134 {
135         return (pgd_val(pgd) == 0);
136 }
137 #ifdef CONFIG_STRICT_KERNEL_RWX
138 extern void hash__mark_rodata_ro(void);
139 extern void hash__mark_initmem_nx(void);
140 #endif
141
142 extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
143                             pte_t *ptep, unsigned long pte, int huge);
144 extern unsigned long htab_convert_pte_flags(unsigned long pteflags);
145 /* Atomic PTE updates */
146 static inline unsigned long hash__pte_update(struct mm_struct *mm,
147                                          unsigned long addr,
148                                          pte_t *ptep, unsigned long clr,
149                                          unsigned long set,
150                                          int huge)
151 {
152         __be64 old_be, tmp_be;
153         unsigned long old;
154
155         __asm__ __volatile__(
156         "1:     ldarx   %0,0,%3         # pte_update\n\
157         and.    %1,%0,%6\n\
158         bne-    1b \n\
159         andc    %1,%0,%4 \n\
160         or      %1,%1,%7\n\
161         stdcx.  %1,0,%3 \n\
162         bne-    1b"
163         : "=&r" (old_be), "=&r" (tmp_be), "=m" (*ptep)
164         : "r" (ptep), "r" (cpu_to_be64(clr)), "m" (*ptep),
165           "r" (cpu_to_be64(H_PAGE_BUSY)), "r" (cpu_to_be64(set))
166         : "cc" );
167         /* huge pages use the old page table lock */
168         if (!huge)
169                 assert_pte_locked(mm, addr);
170
171         old = be64_to_cpu(old_be);
172         if (old & H_PAGE_HASHPTE)
173                 hpte_need_flush(mm, addr, ptep, old, huge);
174
175         return old;
176 }
177
178 /* Set the dirty and/or accessed bits atomically in a linux PTE, this
179  * function doesn't need to flush the hash entry
180  */
181 static inline void hash__ptep_set_access_flags(pte_t *ptep, pte_t entry)
182 {
183         __be64 old, tmp, val, mask;
184
185         mask = cpu_to_be64(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_READ | _PAGE_WRITE |
186                            _PAGE_EXEC | _PAGE_SOFT_DIRTY);
187
188         val = pte_raw(entry) & mask;
189
190         __asm__ __volatile__(
191         "1:     ldarx   %0,0,%4\n\
192                 and.    %1,%0,%6\n\
193                 bne-    1b \n\
194                 or      %0,%3,%0\n\
195                 stdcx.  %0,0,%4\n\
196                 bne-    1b"
197         :"=&r" (old), "=&r" (tmp), "=m" (*ptep)
198         :"r" (val), "r" (ptep), "m" (*ptep), "r" (cpu_to_be64(H_PAGE_BUSY))
199         :"cc");
200 }
201
202 static inline int hash__pte_same(pte_t pte_a, pte_t pte_b)
203 {
204         return (((pte_raw(pte_a) ^ pte_raw(pte_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0);
205 }
206
207 static inline int hash__pte_none(pte_t pte)
208 {
209         return (pte_val(pte) & ~H_PTE_NONE_MASK) == 0;
210 }
211
212 unsigned long pte_get_hash_gslot(unsigned long vpn, unsigned long shift,
213                 int ssize, real_pte_t rpte, unsigned int subpg_index);
214
215 /* This low level function performs the actual PTE insertion
216  * Setting the PTE depends on the MMU type and other factors. It's
217  * an horrible mess that I'm not going to try to clean up now but
218  * I'm keeping it in one place rather than spread around
219  */
220 static inline void hash__set_pte_at(struct mm_struct *mm, unsigned long addr,
221                                   pte_t *ptep, pte_t pte, int percpu)
222 {
223         /*
224          * Anything else just stores the PTE normally. That covers all 64-bit
225          * cases, and 32-bit non-hash with 32-bit PTEs.
226          */
227         *ptep = pte;
228 }
229
230 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
231 extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
232                                    pmd_t *pmdp, unsigned long old_pmd);
233 #else
234 static inline void hpte_do_hugepage_flush(struct mm_struct *mm,
235                                           unsigned long addr, pmd_t *pmdp,
236                                           unsigned long old_pmd)
237 {
238         WARN(1, "%s called with THP disabled\n", __func__);
239 }
240 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
241
242
243 int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot);
244 extern int __meminit hash__vmemmap_create_mapping(unsigned long start,
245                                               unsigned long page_size,
246                                               unsigned long phys);
247 extern void hash__vmemmap_remove_mapping(unsigned long start,
248                                      unsigned long page_size);
249
250 int hash__create_section_mapping(unsigned long start, unsigned long end, int nid);
251 int hash__remove_section_mapping(unsigned long start, unsigned long end);
252
253 #endif /* !__ASSEMBLY__ */
254 #endif /* __KERNEL__ */
255 #endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */