]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
arm64/mm: Pass ttbr1 as a parameter to __enable_mmu()
authorJun Yao <yaojun8558363@gmail.com>
Mon, 24 Sep 2018 13:51:13 +0000 (14:51 +0100)
committerCatalin Marinas <catalin.marinas@arm.com>
Tue, 25 Sep 2018 14:10:54 +0000 (15:10 +0100)
In subsequent patches we'll use a transient pgd during the primary cpu's
boot process. To make this work while allowing secondary cpus to use the
swapper_pg_dir, we need to pass the relevant TTBR1 pgd as a parameter
to __enable_mmu().

This patch updates __enable__mmu() to take this as a parameter, updating
callsites to pass swapper_pg_dir for now.

There should be no functional change as a result of this patch.

Signed-off-by: Jun Yao <yaojun8558363@gmail.com>
Reviewed-by: James Morse <james.morse@arm.com>
[Mark: simplify assembly, clarify commit message]
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/kernel/head.S
arch/arm64/kernel/sleep.S

index b0853069702f73b1597b3b44d3d5282373a9c47c..7983ddf0c0e98af6f37a37b2b2133a8ab9190adc 100644 (file)
@@ -706,6 +706,7 @@ secondary_startup:
         * Common entry point for secondary CPUs.
         */
        bl      __cpu_setup                     // initialise processor
+       adrp    x1, swapper_pg_dir
        bl      __enable_mmu
        ldr     x8, =__secondary_switched
        br      x8
@@ -748,6 +749,7 @@ ENDPROC(__secondary_switched)
  * Enable the MMU.
  *
  *  x0  = SCTLR_EL1 value for turning on the MMU.
+ *  x1  = TTBR1_EL1 value
  *
  * Returns to the caller via x30/lr. This requires the caller to be covered
  * by the .idmap.text section.
@@ -756,17 +758,16 @@ ENDPROC(__secondary_switched)
  * If it isn't, park the CPU
  */
 ENTRY(__enable_mmu)
-       mrs     x1, ID_AA64MMFR0_EL1
-       ubfx    x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4
+       mrs     x2, ID_AA64MMFR0_EL1
+       ubfx    x2, x2, #ID_AA64MMFR0_TGRAN_SHIFT, 4
        cmp     x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
        b.ne    __no_granule_support
-       update_early_cpu_boot_status 0, x1, x2
-       adrp    x1, idmap_pg_dir
-       adrp    x2, swapper_pg_dir
-       phys_to_ttbr x3, x1
-       phys_to_ttbr x4, x2
-       msr     ttbr0_el1, x3                   // load TTBR0
-       msr     ttbr1_el1, x4                   // load TTBR1
+       update_early_cpu_boot_status 0, x2, x3
+       adrp    x2, idmap_pg_dir
+       phys_to_ttbr x1, x1
+       phys_to_ttbr x2, x2
+       msr     ttbr0_el1, x2                   // load TTBR0
+       msr     ttbr1_el1, x1                   // load TTBR1
        isb
        msr     sctlr_el1, x0
        isb
@@ -823,6 +824,7 @@ __primary_switch:
        mrs     x20, sctlr_el1                  // preserve old SCTLR_EL1 value
 #endif
 
+       adrp    x1, swapper_pg_dir
        bl      __enable_mmu
 #ifdef CONFIG_RELOCATABLE
        bl      __relocate_kernel
index bebec8ef9372af905b01acb38ab8999f142eb770..3e53ffa07994a445a88a40a0cab9532f2298c231 100644 (file)
@@ -101,6 +101,7 @@ ENTRY(cpu_resume)
        bl      el2_setup               // if in EL2 drop to EL1 cleanly
        bl      __cpu_setup
        /* enable the MMU early - so we can access sleep_save_stash by va */
+       adrp    x1, swapper_pg_dir
        bl      __enable_mmu
        ldr     x8, =_cpu_resume
        br      x8