]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
arm64: handle 52-bit addresses in TTBR
authorKristina Martsenko <kristina.martsenko@arm.com>
Wed, 13 Dec 2017 17:07:18 +0000 (17:07 +0000)
committerCatalin Marinas <catalin.marinas@arm.com>
Fri, 22 Dec 2017 17:35:21 +0000 (17:35 +0000)
The top 4 bits of a 52-bit physical address are positioned at bits 2..5
in the TTBR registers. Introduce a couple of macros to move the bits
there, and change all TTBR writers to use them.

Leave TTBR0 PAN code unchanged, to avoid complicating it. A system with
52-bit PA will have PAN anyway (because it's ARMv8.1 or later), and a
system without 52-bit PA can only use up to 48-bit PAs. A later patch in
this series will add a kconfig dependency to ensure PAN is configured.

In addition, when using 52-bit PA there is a special alignment
requirement on the top-level table. We don't currently have any VA_BITS
configuration that would violate the requirement, but one could be added
in the future, so add a compile-time BUG_ON to check for it.

Tested-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Tested-by: Bob Picco <bob.picco@oracle.com>
Reviewed-by: Bob Picco <bob.picco@oracle.com>
Signed-off-by: Kristina Martsenko <kristina.martsenko@arm.com>
[catalin.marinas@arm.com: added TTBR_BADD_MASK_52 comment]
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
13 files changed:
arch/arm/include/asm/kvm_mmu.h
arch/arm64/include/asm/assembler.h
arch/arm64/include/asm/kvm_mmu.h
arch/arm64/include/asm/mmu_context.h
arch/arm64/include/asm/pgtable-hwdef.h
arch/arm64/include/asm/pgtable.h
arch/arm64/kernel/head.S
arch/arm64/kernel/hibernate-asm.S
arch/arm64/kernel/hibernate.c
arch/arm64/kvm/hyp-init.S
arch/arm64/mm/pgd.c
arch/arm64/mm/proc.S
virt/kvm/arm/arm.c

index fa6f2174276bdd665519a2cafcaf737e3fda2ce5..8dbec683638b2143e0cb50714058cea6b93c24a7 100644 (file)
@@ -221,6 +221,8 @@ static inline unsigned int kvm_get_vmid_bits(void)
        return 8;
 }
 
+#define kvm_phys_to_vttbr(addr)                (addr)
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* __ARM_KVM_MMU_H__ */
index 04a92307e6c1740095c430420f568dccd3b0495b..49ea3def4bd16647d30b799a8d52ee95e8ed7dd4 100644 (file)
@@ -530,4 +530,20 @@ alternative_else_nop_endif
 #endif
        .endm
 
+/*
+ * Arrange a physical address in a TTBR register, taking care of 52-bit
+ * addresses.
+ *
+ *     phys:   physical address, preserved
+ *     ttbr:   returns the TTBR value
+ */
+       .macro  phys_to_ttbr, phys, ttbr
+#ifdef CONFIG_ARM64_PA_BITS_52
+       orr     \ttbr, \phys, \phys, lsr #46
+       and     \ttbr, \ttbr, #TTBR_BADDR_MASK_52
+#else
+       mov     \ttbr, \phys
+#endif
+       .endm
+
 #endif /* __ASM_ASSEMBLER_H */
index 672c8684d5c2a796fadae762846c1f314016c7c3..747bfff92948dc5503900d9242764d17f1d49afc 100644 (file)
@@ -309,5 +309,7 @@ static inline unsigned int kvm_get_vmid_bits(void)
        return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
 }
 
+#define kvm_phys_to_vttbr(addr)                phys_to_ttbr(addr)
+
 #endif /* __ASSEMBLY__ */
 #endif /* __ARM64_KVM_MMU_H__ */
index 9d155fa9a50791af293916cfdc1ede087f850c6d..accc2ff32a0e8eb6d896df23ef4391ddc588bc9d 100644 (file)
@@ -51,7 +51,7 @@ static inline void contextidr_thread_switch(struct task_struct *next)
  */
 static inline void cpu_set_reserved_ttbr0(void)
 {
-       unsigned long ttbr = __pa_symbol(empty_zero_page);
+       unsigned long ttbr = phys_to_ttbr(__pa_symbol(empty_zero_page));
 
        write_sysreg(ttbr, ttbr0_el1);
        isb();
index 9be2e9371c525aa3b7826c037c5a06279a48b01d..f92be11a209ab73e8a573eea108c916c3d08d8e2 100644 (file)
@@ -16,6 +16,8 @@
 #ifndef __ASM_PGTABLE_HWDEF_H
 #define __ASM_PGTABLE_HWDEF_H
 
+#include <asm/memory.h>
+
 /*
  * Number of page-table levels required to address 'va_bits' wide
  * address, without section mapping. We resolve the top (va_bits - PAGE_SHIFT)
 #define TCR_HA                 (UL(1) << 39)
 #define TCR_HD                 (UL(1) << 40)
 
+/*
+ * TTBR.
+ */
+#ifdef CONFIG_ARM64_PA_BITS_52
+/*
+ * This should be GENMASK_ULL(47, 2).
+ * TTBR_ELx[1] is RES0 in this configuration.
+ */
+#define TTBR_BADDR_MASK_52     (((UL(1) << 46) - 1) << 2)
+#endif
+
 #endif
index 149d05fb9421520bd659b62627941ed36ce46bb3..93677b9db9478bde72a12f8ad3630b8594899a2e 100644 (file)
@@ -733,6 +733,12 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
 #define kc_vaddr_to_offset(v)  ((v) & ~VA_START)
 #define kc_offset_to_vaddr(o)  ((o) | VA_START)
 
+#ifdef CONFIG_ARM64_PA_BITS_52
+#define phys_to_ttbr(addr)     (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
+#else
+#define phys_to_ttbr(addr)     (addr)
+#endif
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* __ASM_PGTABLE_H */
index 67e86a0f57ac43edcee10d89bd5db2e050ae1621..0addea3760a682050f0af9dc122573d972a71f9d 100644 (file)
@@ -679,8 +679,10 @@ ENTRY(__enable_mmu)
        update_early_cpu_boot_status 0, x1, x2
        adrp    x1, idmap_pg_dir
        adrp    x2, swapper_pg_dir
-       msr     ttbr0_el1, x1                   // load TTBR0
-       msr     ttbr1_el1, x2                   // load TTBR1
+       phys_to_ttbr x1, x3
+       phys_to_ttbr x2, x4
+       msr     ttbr0_el1, x3                   // load TTBR0
+       msr     ttbr1_el1, x4                   // load TTBR1
        isb
        msr     sctlr_el1, x0
        isb
index e56d848b64661b590e1faf324732842a76d5922b..84f5d52fdddac594567abf6fb15ece310a00c07c 100644 (file)
  * Even switching to our copied tables will cause a changed output address at
  * each stage of the walk.
  */
-.macro break_before_make_ttbr_switch zero_page, page_table
-       msr     ttbr1_el1, \zero_page
+.macro break_before_make_ttbr_switch zero_page, page_table, tmp
+       phys_to_ttbr \zero_page, \tmp
+       msr     ttbr1_el1, \tmp
        isb
        tlbi    vmalle1
        dsb     nsh
-       msr     ttbr1_el1, \page_table
+       phys_to_ttbr \page_table, \tmp
+       msr     ttbr1_el1, \tmp
        isb
 .endm
 
@@ -78,7 +80,7 @@ ENTRY(swsusp_arch_suspend_exit)
         * We execute from ttbr0, change ttbr1 to our copied linear map tables
         * with a break-before-make via the zero page
         */
-       break_before_make_ttbr_switch   x5, x0
+       break_before_make_ttbr_switch   x5, x0, x6
 
        mov     x21, x1
        mov     x30, x2
@@ -109,7 +111,7 @@ ENTRY(swsusp_arch_suspend_exit)
        dsb     ish             /* wait for PoU cleaning to finish */
 
        /* switch to the restored kernels page tables */
-       break_before_make_ttbr_switch   x25, x21
+       break_before_make_ttbr_switch   x25, x21, x6
 
        ic      ialluis
        dsb     ish
index 3009b8b80f08043e99802e8623022c35398f02e4..efbf6dbd93c87aa757f45f0962bf74ac870b0a43 100644 (file)
@@ -264,7 +264,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
         */
        cpu_set_reserved_ttbr0();
        local_flush_tlb_all();
-       write_sysreg(virt_to_phys(pgd), ttbr0_el1);
+       write_sysreg(phys_to_ttbr(virt_to_phys(pgd)), ttbr0_el1);
        isb();
 
        *phys_dst_addr = virt_to_phys((void *)dst);
index e2d1fe03662a5c5018a2a981104a3d94f6bd4213..f9681cc0097350668467da80a37b697d3b00d443 100644 (file)
@@ -63,7 +63,8 @@ __do_hyp_init:
        cmp     x0, #HVC_STUB_HCALL_NR
        b.lo    __kvm_handle_stub_hvc
 
-       msr     ttbr0_el2, x0
+       phys_to_ttbr x0, x4
+       msr     ttbr0_el2, x4
 
        mrs     x4, tcr_el1
        ldr     x5, =TCR_EL2_MASK
index 051e71ec3335edc316817602a51d8676678c1a0f..289f9113a27a0b07468ba5dca92ebb8f25e3e6ba 100644 (file)
@@ -49,6 +49,14 @@ void __init pgd_cache_init(void)
        if (PGD_SIZE == PAGE_SIZE)
                return;
 
+#ifdef CONFIG_ARM64_PA_BITS_52
+       /*
+        * With 52-bit physical addresses, the architecture requires the
+        * top-level table to be aligned to at least 64 bytes.
+        */
+       BUILD_BUG_ON(PGD_SIZE < 64);
+#endif
+
        /*
         * Naturally aligned pgds required by the architecture.
         */
index 4f133cb340dc1b590e4b6874b12a2b14d3f522b2..e79db5a7576a5bca8d27f566c0a504e86cdb9637 100644 (file)
@@ -138,10 +138,11 @@ ENDPROC(cpu_do_resume)
  *     - pgd_phys - physical address of new TTB
  */
 ENTRY(cpu_do_switch_mm)
-       pre_ttbr0_update_workaround x0, x2, x3
+       phys_to_ttbr x0, x2
+       pre_ttbr0_update_workaround x2, x3, x4
        mmid    x1, x1                          // get mm->context.id
-       bfi     x0, x1, #48, #16                // set the ASID
-       msr     ttbr0_el1, x0                   // set TTBR0
+       bfi     x2, x1, #48, #16                // set the ASID
+       msr     ttbr0_el1, x2                   // set TTBR0
        isb
        post_ttbr0_update_workaround
        ret
@@ -158,14 +159,16 @@ ENTRY(idmap_cpu_replace_ttbr1)
        save_and_disable_daif flags=x2
 
        adrp    x1, empty_zero_page
-       msr     ttbr1_el1, x1
+       phys_to_ttbr x1, x3
+       msr     ttbr1_el1, x3
        isb
 
        tlbi    vmalle1
        dsb     nsh
        isb
 
-       msr     ttbr1_el1, x0
+       phys_to_ttbr x0, x3
+       msr     ttbr1_el1, x3
        isb
 
        restore_daif x2
index 6b60c98a6e2294c773eb20ea4794445a667415ea..c8d49879307f75eac9f527fd2343516ecb098d42 100644 (file)
@@ -509,7 +509,7 @@ static void update_vttbr(struct kvm *kvm)
        pgd_phys = virt_to_phys(kvm->arch.pgd);
        BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK);
        vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
-       kvm->arch.vttbr = pgd_phys | vmid;
+       kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid;
 
        spin_unlock(&kvm_vmid_lock);
 }