]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
arm64: Convert __inval_cache_range() to area-based
authorRobin Murphy <robin.murphy@arm.com>
Tue, 25 Jul 2017 10:55:39 +0000 (11:55 +0100)
committerCatalin Marinas <catalin.marinas@arm.com>
Wed, 9 Aug 2017 10:00:23 +0000 (11:00 +0100)
__inval_cache_range() is already the odd one out among our data cache
maintenance routines as the only remaining range-based one; as we're
going to want an invalidation routine to call from C code for the pmem
API, let's tweak the prototype and name to bring it in line with the
clean operations, and to make its relationship with __dma_inv_area()
neatly mirror that of __clean_dcache_area_poc() and __dma_clean_area().
The loop clearing the early page tables gets mildly massaged in the
process for the sake of consistency.

Reviewed-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/include/asm/cacheflush.h
arch/arm64/kernel/head.S
arch/arm64/mm/cache.S

index 4d4f650c290ea392e0624ebb72c9a223dc16e76b..b4b43a94dffd98ae1f93ee0806595eda5ea8ca19 100644 (file)
@@ -67,6 +67,7 @@
  */
 extern void flush_icache_range(unsigned long start, unsigned long end);
 extern void __flush_dcache_area(void *addr, size_t len);
+extern void __inval_dcache_area(void *addr, size_t len);
 extern void __clean_dcache_area_poc(void *addr, size_t len);
 extern void __clean_dcache_area_pou(void *addr, size_t len);
 extern long __flush_cache_user_range(unsigned long start, unsigned long end);
index 973df7de7bf8d3e4fcde6dc2ba1590fa051964a2..73a0531e0187104a53aee86760f0ea3de3110fd1 100644 (file)
@@ -143,8 +143,8 @@ preserve_boot_args:
        dmb     sy                              // needed before dc ivac with
                                                // MMU off
 
-       add     x1, x0, #0x20                   // 4 x 8 bytes
-       b       __inval_cache_range             // tail call
+       mov     x1, #0x20                       // 4 x 8 bytes
+       b       __inval_dcache_area             // tail call
 ENDPROC(preserve_boot_args)
 
 /*
@@ -221,20 +221,20 @@ __create_page_tables:
         * dirty cache lines being evicted.
         */
        adrp    x0, idmap_pg_dir
-       adrp    x1, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
-       bl      __inval_cache_range
+       ldr     x1, =(IDMAP_DIR_SIZE + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
+       bl      __inval_dcache_area
 
        /*
         * Clear the idmap and swapper page tables.
         */
        adrp    x0, idmap_pg_dir
-       adrp    x6, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
+       ldr     x1, =(IDMAP_DIR_SIZE + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
 1:     stp     xzr, xzr, [x0], #16
        stp     xzr, xzr, [x0], #16
        stp     xzr, xzr, [x0], #16
        stp     xzr, xzr, [x0], #16
-       cmp     x0, x6
-       b.lo    1b
+       subs    x1, x1, #64
+       b.ne    1b
 
        mov     x7, SWAPPER_MM_MMUFLAGS
 
@@ -307,9 +307,9 @@ __create_page_tables:
         * tables again to remove any speculatively loaded cache lines.
         */
        adrp    x0, idmap_pg_dir
-       adrp    x1, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
+       ldr     x1, =(IDMAP_DIR_SIZE + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
        dmb     sy
-       bl      __inval_cache_range
+       bl      __inval_dcache_area
 
        ret     x28
 ENDPROC(__create_page_tables)
index 83c27b6e6dca31007d3b9ae5bc1c48d66456e757..ed47fbbb4b05783e15174885d5dc90f2fb43cf98 100644 (file)
@@ -109,20 +109,25 @@ ENTRY(__clean_dcache_area_pou)
 ENDPROC(__clean_dcache_area_pou)
 
 /*
- *     __dma_inv_area(start, size)
- *     - start   - virtual start address of region
+ *     __inval_dcache_area(kaddr, size)
+ *
+ *     Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ *     are invalidated. Any partial lines at the ends of the interval are
+ *     also cleaned to PoC to prevent data loss.
+ *
+ *     - kaddr   - kernel address
  *     - size    - size in question
  */
-__dma_inv_area:
-       add     x1, x1, x0
+ENTRY(__inval_dcache_area)
        /* FALLTHROUGH */
 
 /*
- *     __inval_cache_range(start, end)
- *     - start   - start address of region
- *     - end     - end address of region
+ *     __dma_inv_area(start, size)
+ *     - start   - virtual start address of region
+ *     - size    - size in question
  */
-ENTRY(__inval_cache_range)
+__dma_inv_area:
+       add     x1, x1, x0
        dcache_line_size x2, x3
        sub     x3, x2, #1
        tst     x1, x3                          // end cache line aligned?
@@ -140,7 +145,7 @@ ENTRY(__inval_cache_range)
        b.lo    2b
        dsb     sy
        ret
-ENDPIPROC(__inval_cache_range)
+ENDPIPROC(__inval_dcache_area)
 ENDPROC(__dma_inv_area)
 
 /*