]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
powerpc/64s/radix: prefetch user address in update_mmu_cache
authorNicholas Piggin <npiggin@gmail.com>
Fri, 1 Jun 2018 10:01:18 +0000 (20:01 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Sun, 3 Jun 2018 10:40:35 +0000 (20:40 +1000)
Prefetch the faulting address in update_mmu_cache to give the page
table walker perhaps 100 cycles head start as locks are dropped and
the interrupt completed.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/mm/mem.c
arch/powerpc/mm/pgtable-book3s64.c

index c3c39b02b2ba7b0ae58df5bf293103b382b2b708..8cecda4bd66ae43d79f1c16beb4e229ecd26cc92 100644 (file)
@@ -509,8 +509,10 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
         */
        unsigned long access, trap;
 
-       if (radix_enabled())
+       if (radix_enabled()) {
+               prefetch((void *)address);
                return;
+       }
 
        /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
        if (!pte_young(*ptep) || address >= TASK_SIZE)
index 82fed87289de83dd5f8ab083c426c6177c5b120b..c1f4ca45c93a488df07d66525f0d935ca342f84c 100644 (file)
@@ -152,7 +152,8 @@ pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
 void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
                          pmd_t *pmd)
 {
-       return;
+       if (radix_enabled())
+               prefetch((void *)addr);
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */