2 * TLB flush routines for radix kernels.
4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #include <linux/hugetlb.h>
14 #include <linux/memblock.h>
17 #include <asm/tlbflush.h>
19 static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
21 static inline void __tlbiel_pid(unsigned long pid, int set)
23 unsigned long rb,rs,ric,prs,r;
25 rb = PPC_BIT(53); /* IS = 1 */
26 rb |= set << PPC_BITLSHIFT(51);
27 rs = ((unsigned long)pid) << PPC_BITLSHIFT(31);
28 prs = 1; /* process scoped */
29 r = 1; /* raidx format */
30 ric = 2; /* invalidate all the caches */
32 asm volatile("ptesync": : :"memory");
33 asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |"
34 "(%2 << 17) | (%3 << 18) | (%4 << 21)"
35 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
36 asm volatile("ptesync": : :"memory");
40 * We use 128 set in radix mode and 256 set in hpt mode.
42 static inline void _tlbiel_pid(unsigned long pid)
46 for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) {
47 __tlbiel_pid(pid, set);
52 static inline void _tlbie_pid(unsigned long pid)
54 unsigned long rb,rs,ric,prs,r;
56 rb = PPC_BIT(53); /* IS = 1 */
57 rs = pid << PPC_BITLSHIFT(31);
58 prs = 1; /* process scoped */
59 r = 1; /* raidx format */
60 ric = 2; /* invalidate all the caches */
62 asm volatile("ptesync": : :"memory");
63 asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |"
64 "(%2 << 17) | (%3 << 18) | (%4 << 21)"
65 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
66 asm volatile("eieio; tlbsync; ptesync": : :"memory");
69 static inline void _tlbiel_va(unsigned long va, unsigned long pid,
72 unsigned long rb,rs,ric,prs,r;
74 rb = va & ~(PPC_BITMASK(52, 63));
75 rb |= ap << PPC_BITLSHIFT(58);
76 rs = pid << PPC_BITLSHIFT(31);
77 prs = 1; /* process scoped */
78 r = 1; /* raidx format */
79 ric = 0; /* no cluster flush yet */
81 asm volatile("ptesync": : :"memory");
82 asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |"
83 "(%2 << 17) | (%3 << 18) | (%4 << 21)"
84 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
85 asm volatile("ptesync": : :"memory");
88 static inline void _tlbie_va(unsigned long va, unsigned long pid,
91 unsigned long rb,rs,ric,prs,r;
93 rb = va & ~(PPC_BITMASK(52, 63));
94 rb |= ap << PPC_BITLSHIFT(58);
95 rs = pid << PPC_BITLSHIFT(31);
96 prs = 1; /* process scoped */
97 r = 1; /* raidx format */
98 ric = 0; /* no cluster flush yet */
100 asm volatile("ptesync": : :"memory");
101 asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |"
102 "(%2 << 17) | (%3 << 18) | (%4 << 21)"
103 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
104 asm volatile("eieio; tlbsync; ptesync": : :"memory");
108 * Base TLB flushing operations:
110 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
111 * - flush_tlb_page(vma, vmaddr) flushes one page
112 * - flush_tlb_range(vma, start, end) flushes a range of pages
113 * - flush_tlb_kernel_range(start, end) flushes kernel pages
115 * - local_* variants of page and mm only apply to the current
118 void radix__local_flush_tlb_mm(struct mm_struct *mm)
123 pid = mm->context.id;
124 if (pid != MMU_NO_CONTEXT)
128 EXPORT_SYMBOL(radix__local_flush_tlb_mm);
130 void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
131 unsigned long ap, int nid)
136 pid = mm ? mm->context.id : 0;
137 if (pid != MMU_NO_CONTEXT)
138 _tlbiel_va(vmaddr, pid, ap);
142 void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
144 #ifdef CONFIG_HUGETLB_PAGE
145 /* need the return fix for nohash.c */
146 if (vma && is_vm_hugetlb_page(vma))
147 return __local_flush_hugetlb_page(vma, vmaddr);
149 radix___local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
150 mmu_get_ap(mmu_virtual_psize), 0);
152 EXPORT_SYMBOL(radix__local_flush_tlb_page);
155 static int mm_is_core_local(struct mm_struct *mm)
157 return cpumask_subset(mm_cpumask(mm),
158 topology_sibling_cpumask(smp_processor_id()));
161 void radix__flush_tlb_mm(struct mm_struct *mm)
166 pid = mm->context.id;
167 if (unlikely(pid == MMU_NO_CONTEXT))
170 if (!mm_is_core_local(mm)) {
171 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
174 raw_spin_lock(&native_tlbie_lock);
177 raw_spin_unlock(&native_tlbie_lock);
183 EXPORT_SYMBOL(radix__flush_tlb_mm);
185 void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
186 unsigned long ap, int nid)
191 pid = mm ? mm->context.id : 0;
192 if (unlikely(pid == MMU_NO_CONTEXT))
194 if (!mm_is_core_local(mm)) {
195 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
198 raw_spin_lock(&native_tlbie_lock);
199 _tlbie_va(vmaddr, pid, ap);
201 raw_spin_unlock(&native_tlbie_lock);
203 _tlbiel_va(vmaddr, pid, ap);
208 void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
210 #ifdef CONFIG_HUGETLB_PAGE
211 if (vma && is_vm_hugetlb_page(vma))
212 return flush_hugetlb_page(vma, vmaddr);
214 radix___flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
215 mmu_get_ap(mmu_virtual_psize), 0);
217 EXPORT_SYMBOL(radix__flush_tlb_page);
219 #endif /* CONFIG_SMP */
221 void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end)
223 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
226 raw_spin_lock(&native_tlbie_lock);
229 raw_spin_unlock(&native_tlbie_lock);
231 EXPORT_SYMBOL(radix__flush_tlb_kernel_range);
234 * Currently, for range flushing, we just do a full mm flush. Because
235 * we use this in code path where we don' track the page size.
237 void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
241 struct mm_struct *mm = vma->vm_mm;
242 radix__flush_tlb_mm(mm);
244 EXPORT_SYMBOL(radix__flush_tlb_range);
247 void radix__tlb_flush(struct mmu_gather *tlb)
249 struct mm_struct *mm = tlb->mm;
250 radix__flush_tlb_mm(mm);