1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * native hashtable management.
5 * SMP scalability work:
6 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
11 #include <linux/spinlock.h>
12 #include <linux/bitops.h>
14 #include <linux/processor.h>
15 #include <linux/threads.h>
16 #include <linux/smp.h>
18 #include <asm/machdep.h>
20 #include <asm/mmu_context.h>
21 #include <asm/pgtable.h>
22 #include <asm/trace.h>
24 #include <asm/cputable.h>
26 #include <asm/kexec.h>
27 #include <asm/ppc-opcode.h>
28 #include <asm/feature-fixups.h>
30 #include <misc/cxl-base.h>
33 #define DBG_LOW(fmt...) udbg_printf(fmt)
35 #define DBG_LOW(fmt...)
39 #define HPTE_LOCK_BIT 3
41 #define HPTE_LOCK_BIT (56+3)
44 static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
46 static inline void tlbiel_hash_set_isa206(unsigned int set, unsigned int is)
50 rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53));
52 asm volatile("tlbiel %0" : : "r" (rb));
56 * tlbiel instruction for hash, set invalidation
57 * i.e., r=1 and is=01 or is=10 or is=11
59 static __always_inline void tlbiel_hash_set_isa300(unsigned int set, unsigned int is,
61 unsigned int ric, unsigned int prs)
65 unsigned int r = 0; /* hash format */
67 rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53));
68 rs = ((unsigned long)pid << PPC_BITLSHIFT(31));
70 asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4)
71 : : "r"(rb), "r"(rs), "i"(ric), "i"(prs), "r"(r)
76 static void tlbiel_all_isa206(unsigned int num_sets, unsigned int is)
80 asm volatile("ptesync": : :"memory");
82 for (set = 0; set < num_sets; set++)
83 tlbiel_hash_set_isa206(set, is);
85 asm volatile("ptesync": : :"memory");
88 static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is)
92 asm volatile("ptesync": : :"memory");
95 * Flush the first set of the TLB, and any caching of partition table
96 * entries. Then flush the remaining sets of the TLB. Hash mode uses
97 * partition scoped TLB translations.
99 tlbiel_hash_set_isa300(0, is, 0, 2, 0);
100 for (set = 1; set < num_sets; set++)
101 tlbiel_hash_set_isa300(set, is, 0, 0, 0);
104 * Now invalidate the process table cache.
106 * From ISA v3.0B p. 1078:
107 * The following forms are invalid.
108 * * PRS=1, R=0, and RIC!=2 (The only process-scoped
109 * HPT caching is of the Process Table.)
111 tlbiel_hash_set_isa300(0, is, 0, 2, 1);
113 asm volatile("ptesync": : :"memory");
115 asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT "; isync" : : :"memory");
118 void hash__tlbiel_all(unsigned int action)
123 case TLB_INVAL_SCOPE_GLOBAL:
126 case TLB_INVAL_SCOPE_LPID:
133 if (early_cpu_has_feature(CPU_FTR_ARCH_300))
134 tlbiel_all_isa300(POWER9_TLB_SETS_HASH, is);
135 else if (early_cpu_has_feature(CPU_FTR_ARCH_207S))
136 tlbiel_all_isa206(POWER8_TLB_SETS, is);
137 else if (early_cpu_has_feature(CPU_FTR_ARCH_206))
138 tlbiel_all_isa206(POWER7_TLB_SETS, is);
140 WARN(1, "%s called on pre-POWER7 CPU\n", __func__);
143 static inline unsigned long ___tlbie(unsigned long vpn, int psize,
144 int apsize, int ssize)
151 * We need 14 to 65 bits of va for a tlibe of 4K page
152 * With vpn we ignore the lower VPN_SHIFT bits already.
153 * And top two bits are already ignored because we can
154 * only accomodate 76 bits in a 64 bit vpn with a VPN_SHIFT
157 va = vpn << VPN_SHIFT;
159 * clear top 16 bits of 64bit va, non SLS segment
160 * Older versions of the architecture (2.02 and earler) require the
161 * masking of the top 16 bits.
163 if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA))
164 va &= ~(0xffffULL << 48);
168 /* clear out bits after (52) [0....52.....63] */
169 va &= ~((1ul << (64 - 52)) - 1);
171 sllp = get_sllp_encoding(apsize);
173 asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
174 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
178 /* We need 14 to 14 + i bits of va */
179 penc = mmu_psize_defs[psize].penc[apsize];
180 va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
185 * We don't need all the bits, but rest of the bits
186 * must be ignored by the processor.
187 * vpn cover upto 65 bits of va. (0...65) and we need
190 va |= (vpn & 0xfe); /* AVAL */
192 asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
193 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
200 static inline void fixup_tlbie(unsigned long vpn, int psize, int apsize, int ssize)
202 if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
203 /* Need the extra ptesync to ensure we don't reorder tlbie*/
204 asm volatile("ptesync": : :"memory");
205 ___tlbie(vpn, psize, apsize, ssize);
209 static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
213 rb = ___tlbie(vpn, psize, apsize, ssize);
214 trace_tlbie(0, 0, rb, 0, 0, 0, 0);
217 static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
223 /* VPN_SHIFT can be atmost 12 */
224 va = vpn << VPN_SHIFT;
226 * clear top 16 bits of 64 bit va, non SLS segment
227 * Older versions of the architecture (2.02 and earler) require the
228 * masking of the top 16 bits.
230 if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA))
231 va &= ~(0xffffULL << 48);
235 /* clear out bits after(52) [0....52.....63] */
236 va &= ~((1ul << (64 - 52)) - 1);
238 sllp = get_sllp_encoding(apsize);
240 asm volatile(ASM_FTR_IFSET("tlbiel %0", "tlbiel %0,0", %1)
241 : : "r" (va), "i" (CPU_FTR_ARCH_206)
245 /* We need 14 to 14 + i bits of va */
246 penc = mmu_psize_defs[psize].penc[apsize];
247 va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
252 * We don't need all the bits, but rest of the bits
253 * must be ignored by the processor.
254 * vpn cover upto 65 bits of va. (0...65) and we need
259 asm volatile(ASM_FTR_IFSET("tlbiel %0", "tlbiel %0,1", %1)
260 : : "r" (va), "i" (CPU_FTR_ARCH_206)
264 trace_tlbie(0, 1, va, 0, 0, 0, 0);
268 static inline void tlbie(unsigned long vpn, int psize, int apsize,
269 int ssize, int local)
271 unsigned int use_local;
272 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
274 use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && !cxl_ctx_in_use();
277 use_local = mmu_psize_defs[psize].tlbiel;
278 if (lock_tlbie && !use_local)
279 raw_spin_lock(&native_tlbie_lock);
280 asm volatile("ptesync": : :"memory");
282 __tlbiel(vpn, psize, apsize, ssize);
283 asm volatile("ptesync": : :"memory");
285 __tlbie(vpn, psize, apsize, ssize);
286 fixup_tlbie(vpn, psize, apsize, ssize);
287 asm volatile("eieio; tlbsync; ptesync": : :"memory");
289 if (lock_tlbie && !use_local)
290 raw_spin_unlock(&native_tlbie_lock);
293 static inline void native_lock_hpte(struct hash_pte *hptep)
295 unsigned long *word = (unsigned long *)&hptep->v;
298 if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))
301 while(test_bit(HPTE_LOCK_BIT, word))
307 static inline void native_unlock_hpte(struct hash_pte *hptep)
309 unsigned long *word = (unsigned long *)&hptep->v;
311 clear_bit_unlock(HPTE_LOCK_BIT, word);
314 static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
315 unsigned long pa, unsigned long rflags,
316 unsigned long vflags, int psize, int apsize, int ssize)
318 struct hash_pte *hptep = htab_address + hpte_group;
319 unsigned long hpte_v, hpte_r;
322 if (!(vflags & HPTE_V_BOLTED)) {
323 DBG_LOW(" insert(group=%lx, vpn=%016lx, pa=%016lx,"
324 " rflags=%lx, vflags=%lx, psize=%d)\n",
325 hpte_group, vpn, pa, rflags, vflags, psize);
328 for (i = 0; i < HPTES_PER_GROUP; i++) {
329 if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) {
330 /* retry with lock held */
331 native_lock_hpte(hptep);
332 if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID))
334 native_unlock_hpte(hptep);
340 if (i == HPTES_PER_GROUP)
343 hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
344 hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
346 if (!(vflags & HPTE_V_BOLTED)) {
347 DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
351 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
352 hpte_r = hpte_old_to_new_r(hpte_v, hpte_r);
353 hpte_v = hpte_old_to_new_v(hpte_v);
356 hptep->r = cpu_to_be64(hpte_r);
357 /* Guarantee the second dword is visible before the valid bit */
360 * Now set the first dword including the valid bit
361 * NOTE: this also unlocks the hpte
363 hptep->v = cpu_to_be64(hpte_v);
365 __asm__ __volatile__ ("ptesync" : : : "memory");
367 return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
370 static long native_hpte_remove(unsigned long hpte_group)
372 struct hash_pte *hptep;
375 unsigned long hpte_v;
377 DBG_LOW(" remove(group=%lx)\n", hpte_group);
379 /* pick a random entry to start at */
380 slot_offset = mftb() & 0x7;
382 for (i = 0; i < HPTES_PER_GROUP; i++) {
383 hptep = htab_address + hpte_group + slot_offset;
384 hpte_v = be64_to_cpu(hptep->v);
386 if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
387 /* retry with lock held */
388 native_lock_hpte(hptep);
389 hpte_v = be64_to_cpu(hptep->v);
390 if ((hpte_v & HPTE_V_VALID)
391 && !(hpte_v & HPTE_V_BOLTED))
393 native_unlock_hpte(hptep);
400 if (i == HPTES_PER_GROUP)
403 /* Invalidate the hpte. NOTE: this also unlocks it */
409 static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
410 unsigned long vpn, int bpsize,
411 int apsize, int ssize, unsigned long flags)
413 struct hash_pte *hptep = htab_address + slot;
414 unsigned long hpte_v, want_v;
415 int ret = 0, local = 0;
417 want_v = hpte_encode_avpn(vpn, bpsize, ssize);
419 DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
420 vpn, want_v & HPTE_V_AVPN, slot, newpp);
422 hpte_v = hpte_get_old_v(hptep);
424 * We need to invalidate the TLB always because hpte_remove doesn't do
425 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
426 * random entry from it. When we do that we don't invalidate the TLB
427 * (hpte_remove) because we assume the old translation is still
428 * technically "valid".
430 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
431 DBG_LOW(" -> miss\n");
434 native_lock_hpte(hptep);
435 /* recheck with locks held */
436 hpte_v = hpte_get_old_v(hptep);
437 if (unlikely(!HPTE_V_COMPARE(hpte_v, want_v) ||
438 !(hpte_v & HPTE_V_VALID))) {
441 DBG_LOW(" -> hit\n");
442 /* Update the HPTE */
443 hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
444 ~(HPTE_R_PPP | HPTE_R_N)) |
445 (newpp & (HPTE_R_PPP | HPTE_R_N |
448 native_unlock_hpte(hptep);
451 if (flags & HPTE_LOCAL_UPDATE)
454 * Ensure it is out of the tlb too if it is not a nohpte fault
456 if (!(flags & HPTE_NOHPTE_UPDATE))
457 tlbie(vpn, bpsize, apsize, ssize, local);
462 static long native_hpte_find(unsigned long vpn, int psize, int ssize)
464 struct hash_pte *hptep;
468 unsigned long want_v, hpte_v;
470 hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
471 want_v = hpte_encode_avpn(vpn, psize, ssize);
473 /* Bolted mappings are only ever in the primary group */
474 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
475 for (i = 0; i < HPTES_PER_GROUP; i++) {
477 hptep = htab_address + slot;
478 hpte_v = hpte_get_old_v(hptep);
479 if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
489 * Update the page protection bits. Intended to be used to create
490 * guard pages for kernel data structures on pages which are bolted
491 * in the HPT. Assumes pages being operated on will not be stolen.
493 * No need to lock here because we should be the only user.
495 static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
496 int psize, int ssize)
501 struct hash_pte *hptep;
503 vsid = get_kernel_vsid(ea, ssize);
504 vpn = hpt_vpn(ea, vsid, ssize);
506 slot = native_hpte_find(vpn, psize, ssize);
508 panic("could not find page to bolt\n");
509 hptep = htab_address + slot;
511 /* Update the HPTE */
512 hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
513 ~(HPTE_R_PPP | HPTE_R_N)) |
514 (newpp & (HPTE_R_PPP | HPTE_R_N)));
516 * Ensure it is out of the tlb too. Bolted entries base and
517 * actual page size will be same.
519 tlbie(vpn, psize, psize, ssize, 0);
523 * Remove a bolted kernel entry. Memory hotplug uses this.
525 * No need to lock here because we should be the only user.
527 static int native_hpte_removebolted(unsigned long ea, int psize, int ssize)
532 struct hash_pte *hptep;
534 vsid = get_kernel_vsid(ea, ssize);
535 vpn = hpt_vpn(ea, vsid, ssize);
537 slot = native_hpte_find(vpn, psize, ssize);
541 hptep = htab_address + slot;
543 VM_WARN_ON(!(be64_to_cpu(hptep->v) & HPTE_V_BOLTED));
545 /* Invalidate the hpte */
548 /* Invalidate the TLB */
549 tlbie(vpn, psize, psize, ssize, 0);
554 static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
555 int bpsize, int apsize, int ssize, int local)
557 struct hash_pte *hptep = htab_address + slot;
558 unsigned long hpte_v;
559 unsigned long want_v;
562 local_irq_save(flags);
564 DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
566 want_v = hpte_encode_avpn(vpn, bpsize, ssize);
567 hpte_v = hpte_get_old_v(hptep);
569 if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
570 native_lock_hpte(hptep);
571 /* recheck with locks held */
572 hpte_v = hpte_get_old_v(hptep);
574 if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
575 /* Invalidate the hpte. NOTE: this also unlocks it */
578 native_unlock_hpte(hptep);
581 * We need to invalidate the TLB always because hpte_remove doesn't do
582 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
583 * random entry from it. When we do that we don't invalidate the TLB
584 * (hpte_remove) because we assume the old translation is still
585 * technically "valid".
587 tlbie(vpn, bpsize, apsize, ssize, local);
589 local_irq_restore(flags);
592 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
593 static void native_hugepage_invalidate(unsigned long vsid,
595 unsigned char *hpte_slot_array,
596 int psize, int ssize, int local)
599 struct hash_pte *hptep;
600 int actual_psize = MMU_PAGE_16M;
601 unsigned int max_hpte_count, valid;
602 unsigned long flags, s_addr = addr;
603 unsigned long hpte_v, want_v, shift;
604 unsigned long hidx, vpn = 0, hash, slot;
606 shift = mmu_psize_defs[psize].shift;
607 max_hpte_count = 1U << (PMD_SHIFT - shift);
609 local_irq_save(flags);
610 for (i = 0; i < max_hpte_count; i++) {
611 valid = hpte_valid(hpte_slot_array, i);
614 hidx = hpte_hash_index(hpte_slot_array, i);
617 addr = s_addr + (i * (1ul << shift));
618 vpn = hpt_vpn(addr, vsid, ssize);
619 hash = hpt_hash(vpn, shift, ssize);
620 if (hidx & _PTEIDX_SECONDARY)
623 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
624 slot += hidx & _PTEIDX_GROUP_IX;
626 hptep = htab_address + slot;
627 want_v = hpte_encode_avpn(vpn, psize, ssize);
628 hpte_v = hpte_get_old_v(hptep);
630 /* Even if we miss, we need to invalidate the TLB */
631 if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
632 /* recheck with locks held */
633 native_lock_hpte(hptep);
634 hpte_v = hpte_get_old_v(hptep);
636 if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
638 * Invalidate the hpte. NOTE: this also unlocks it
643 native_unlock_hpte(hptep);
646 * We need to do tlb invalidate for all the address, tlbie
647 * instruction compares entry_VA in tlb with the VA specified
650 tlbie(vpn, psize, actual_psize, ssize, local);
652 local_irq_restore(flags);
655 static void native_hugepage_invalidate(unsigned long vsid,
657 unsigned char *hpte_slot_array,
658 int psize, int ssize, int local)
660 WARN(1, "%s called without THP support\n", __func__);
664 static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
665 int *psize, int *apsize, int *ssize, unsigned long *vpn)
667 unsigned long avpn, pteg, vpi;
668 unsigned long hpte_v = be64_to_cpu(hpte->v);
669 unsigned long hpte_r = be64_to_cpu(hpte->r);
670 unsigned long vsid, seg_off;
671 int size, a_size, shift;
672 /* Look at the 8 bit LP value */
673 unsigned int lp = (hpte_r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
675 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
676 hpte_v = hpte_new_to_old_v(hpte_v, hpte_r);
677 hpte_r = hpte_new_to_old_r(hpte_r);
679 if (!(hpte_v & HPTE_V_LARGE)) {
681 a_size = MMU_PAGE_4K;
683 size = hpte_page_sizes[lp] & 0xf;
684 a_size = hpte_page_sizes[lp] >> 4;
686 /* This works for all page sizes, and for 256M and 1T segments */
687 *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
688 shift = mmu_psize_defs[size].shift;
690 avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
691 pteg = slot / HPTES_PER_GROUP;
692 if (hpte_v & HPTE_V_SECONDARY)
696 case MMU_SEGSIZE_256M:
697 /* We only have 28 - 23 bits of seg_off in avpn */
698 seg_off = (avpn & 0x1f) << 23;
700 /* We can find more bits from the pteg value */
702 vpi = (vsid ^ pteg) & htab_hash_mask;
703 seg_off |= vpi << shift;
705 *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT;
708 /* We only have 40 - 23 bits of seg_off in avpn */
709 seg_off = (avpn & 0x1ffff) << 23;
712 vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
713 seg_off |= vpi << shift;
715 *vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT;
725 * clear all mappings on kexec. All cpus are in real mode (or they will
726 * be when they isi), and we are the only one left. We rely on our kernel
727 * mapping being 0xC0's and the hardware ignoring those two real bits.
729 * This must be called with interrupts disabled.
731 * Taking the native_tlbie_lock is unsafe here due to the possibility of
732 * lockdep being on. On pre POWER5 hardware, not taking the lock could
733 * cause deadlock. POWER5 and newer not taking the lock is fine. This only
734 * gets called during boot before secondary CPUs have come up and during
735 * crashdump and all bets are off anyway.
737 * TODO: add batching support when enabled. remember, no dynamic memory here,
738 * although there is the control page available...
740 static void native_hpte_clear(void)
742 unsigned long vpn = 0;
743 unsigned long slot, slots;
744 struct hash_pte *hptep = htab_address;
745 unsigned long hpte_v;
746 unsigned long pteg_count;
747 int psize, apsize, ssize;
749 pteg_count = htab_hash_mask + 1;
751 slots = pteg_count * HPTES_PER_GROUP;
753 for (slot = 0; slot < slots; slot++, hptep++) {
755 * we could lock the pte here, but we are the only cpu
756 * running, right? and for crash dump, we probably
757 * don't want to wait for a maybe bad cpu.
759 hpte_v = be64_to_cpu(hptep->v);
762 * Call __tlbie() here rather than tlbie() since we can't take the
765 if (hpte_v & HPTE_V_VALID) {
766 hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
768 ___tlbie(vpn, psize, apsize, ssize);
772 asm volatile("eieio; tlbsync; ptesync":::"memory");
776 * Batched hash table flush, we batch the tlbie's to avoid taking/releasing
777 * the lock all the time
779 static void native_flush_hash_range(unsigned long number, int local)
781 unsigned long vpn = 0;
782 unsigned long hash, index, hidx, shift, slot;
783 struct hash_pte *hptep;
784 unsigned long hpte_v;
785 unsigned long want_v;
788 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
789 unsigned long psize = batch->psize;
790 int ssize = batch->ssize;
792 unsigned int use_local;
794 use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) &&
795 mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use();
797 local_irq_save(flags);
799 for (i = 0; i < number; i++) {
803 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
804 hash = hpt_hash(vpn, shift, ssize);
805 hidx = __rpte_to_hidx(pte, index);
806 if (hidx & _PTEIDX_SECONDARY)
808 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
809 slot += hidx & _PTEIDX_GROUP_IX;
810 hptep = htab_address + slot;
811 want_v = hpte_encode_avpn(vpn, psize, ssize);
812 hpte_v = hpte_get_old_v(hptep);
814 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
816 /* lock and try again */
817 native_lock_hpte(hptep);
818 hpte_v = hpte_get_old_v(hptep);
820 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
821 native_unlock_hpte(hptep);
825 } pte_iterate_hashed_end();
829 asm volatile("ptesync":::"memory");
830 for (i = 0; i < number; i++) {
834 pte_iterate_hashed_subpages(pte, psize,
836 __tlbiel(vpn, psize, psize, ssize);
837 } pte_iterate_hashed_end();
839 asm volatile("ptesync":::"memory");
841 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
844 raw_spin_lock(&native_tlbie_lock);
846 asm volatile("ptesync":::"memory");
847 for (i = 0; i < number; i++) {
851 pte_iterate_hashed_subpages(pte, psize,
853 __tlbie(vpn, psize, psize, ssize);
854 } pte_iterate_hashed_end();
857 * Just do one more with the last used values.
859 fixup_tlbie(vpn, psize, psize, ssize);
860 asm volatile("eieio; tlbsync; ptesync":::"memory");
863 raw_spin_unlock(&native_tlbie_lock);
866 local_irq_restore(flags);
869 void __init hpte_init_native(void)
871 mmu_hash_ops.hpte_invalidate = native_hpte_invalidate;
872 mmu_hash_ops.hpte_updatepp = native_hpte_updatepp;
873 mmu_hash_ops.hpte_updateboltedpp = native_hpte_updateboltedpp;
874 mmu_hash_ops.hpte_removebolted = native_hpte_removebolted;
875 mmu_hash_ops.hpte_insert = native_hpte_insert;
876 mmu_hash_ops.hpte_remove = native_hpte_remove;
877 mmu_hash_ops.hpte_clear_all = native_hpte_clear;
878 mmu_hash_ops.flush_hash_range = native_flush_hash_range;
879 mmu_hash_ops.hugepage_invalidate = native_hugepage_invalidate;