]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
csky: Enable defer flush_dcache_page for abiv2 cpus (807/810/860)
authorGuo Ren <guoren@linux.alibaba.com>
Sun, 26 Jan 2020 17:20:36 +0000 (01:20 +0800)
committerGuo Ren <guoren@linux.alibaba.com>
Fri, 21 Feb 2020 07:43:24 +0000 (15:43 +0800)
Instead of flushing cache per update_mmu_cache() called, we use
flush_dcache_page to reduce the frequency of flashing the cache.

As abiv2 cpus are all PIPT for icache & dcache, we needn't handle
dcache aliasing problem. But their icache can't snoop dcache, so
we still need sync_icache_dcache in update_mmu_cache().

Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
arch/csky/abiv2/cacheflush.c
arch/csky/abiv2/inc/abi/cacheflush.h

index f64b415f6fdef98cf759c23e8d71fd6265c990bb..ba469953a16e6ee9daf879aba2faa34e8baada27 100644 (file)
@@ -9,20 +9,22 @@
 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
                      pte_t *pte)
 {
-       unsigned long addr, pfn;
+       unsigned long addr;
        struct page *page;
 
-       pfn = pte_pfn(*pte);
-       if (unlikely(!pfn_valid(pfn)))
+       page = pfn_to_page(pte_pfn(*pte));
+       if (page == ZERO_PAGE(0))
                return;
 
-       page = pfn_to_page(pfn);
-       if (page == ZERO_PAGE(0))
+       if (test_and_set_bit(PG_dcache_clean, &page->flags))
                return;
 
        addr = (unsigned long) kmap_atomic(page);
 
-       cache_wbinv_range(addr, addr + PAGE_SIZE);
+       dcache_wb_range(addr, addr + PAGE_SIZE);
+
+       if (vma->vm_flags & VM_EXEC)
+               icache_inv_range(addr, addr + PAGE_SIZE);
 
        kunmap_atomic((void *) addr);
 }
index 62a9031fffd8591cf0473e621ef629be130be29b..acd7c6c55d61532f42e55dcb4a240b4dd5c62c65 100644 (file)
 #define flush_cache_dup_mm(mm)                 do { } while (0)
 #define flush_cache_range(vma, start, end)     do { } while (0)
 #define flush_cache_page(vma, vmaddr, pfn)     do { } while (0)
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define flush_dcache_page(page)                        do { } while (0)
+
+#define PG_dcache_clean                PG_arch_1
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+static inline void flush_dcache_page(struct page *page)
+{
+       if (test_bit(PG_dcache_clean, &page->flags))
+               clear_bit(PG_dcache_clean, &page->flags);
+}
+
 #define flush_dcache_mmap_lock(mapping)                do { } while (0)
 #define flush_dcache_mmap_unlock(mapping)      do { } while (0)
 #define flush_icache_page(vma, page)           do { } while (0)