]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
powerpc/mm/radix: Avoid flushing the PWC on every flush_tlb_range
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>
Wed, 19 Jul 2017 04:49:06 +0000 (14:49 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Wed, 2 Aug 2017 03:11:06 +0000 (13:11 +1000)
We do that because it's used by THP pmd collapsing, so use
instead a dedicated flush function.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
arch/powerpc/mm/pgtable-radix.c
arch/powerpc/mm/tlb-radix.c

index 7196999cdc8292310e2ef941bdc46855df75a77b..9b433a624bf3620d46cd97e6b8e563abce691169 100644 (file)
@@ -36,6 +36,7 @@ extern void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmad
 #define radix__flush_tlb_page_psize(mm,addr,p) radix__local_flush_tlb_page_psize(mm,addr,p)
 #endif
 extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
+extern void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr);
 extern void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
                                     unsigned long page_size);
 extern void radix__flush_tlb_lpid(unsigned long lpid);
index 5cc50d47ce3f99f4bf331a4b6a4c4be18c46379e..5d05245208ee38703a806dbb8eae8ce6fc2e3da2 100644 (file)
@@ -804,9 +804,12 @@ pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addre
         */
        pmd = *pmdp;
        pmd_clear(pmdp);
+
        /*FIXME!!  Verify whether we need this kick below */
        kick_all_cpus_sync();
-       flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+
+       radix__flush_tlb_collapsed_pmd(vma->vm_mm, address);
+
        return pmd;
 }
 
index 28f339cdd8360243b8164937524dca2f7ad91319..18151e9ad694625179fae9161024600e9bc53a7b 100644 (file)
@@ -272,11 +272,7 @@ void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
 {
        struct mm_struct *mm = vma->vm_mm;
 
-       /*
-        * This is currently used when collapsing THPs so we need to
-        * flush the PWC. We should fix this.
-        */
-       radix__flush_all_mm(mm);
+       radix__flush_tlb_mm(mm);
 }
 EXPORT_SYMBOL(radix__flush_tlb_range);
 
@@ -355,6 +351,43 @@ void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
        preempt_enable();
 }
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr)
+{
+       int local = mm_is_thread_local(mm);
+       unsigned long ap = mmu_get_ap(mmu_virtual_psize);
+       unsigned long pid, end;
+
+
+       pid = mm ? mm->context.id : 0;
+       if (unlikely(pid == MMU_NO_CONTEXT))
+               goto no_context;
+
+       /* 4k page size, just blow the world */
+       if (PAGE_SIZE == 0x1000) {
+               radix__flush_all_mm(mm);
+               return;
+       }
+
+       /* Otherwise first do the PWC */
+       if (local)
+               _tlbiel_pid(pid, RIC_FLUSH_PWC);
+       else
+               _tlbie_pid(pid, RIC_FLUSH_PWC);
+
+       /* Then iterate the pages */
+       end = addr + HPAGE_PMD_SIZE;
+       for (; addr < end; addr += PAGE_SIZE) {
+               if (local)
+                       _tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
+               else
+                       _tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
+       }
+no_context:
+       preempt_enable();
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
 void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
                              unsigned long page_size)
 {