]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
arm64: mm: introduce 52-bit userspace support
authorSteve Capper <steve.capper@arm.com>
Thu, 6 Dec 2018 22:50:41 +0000 (22:50 +0000)
committerWill Deacon <will.deacon@arm.com>
Mon, 10 Dec 2018 18:42:17 +0000 (18:42 +0000)
On arm64 there is optional support for a 52-bit virtual address space.
To exploit this one has to be running with a 64KB page size and be
running on hardware that supports this.

For an arm64 kernel supporting a 48 bit VA with a 64KB page size,
some changes are needed to support a 52-bit userspace:
 * TCR_EL1.T0SZ needs to be 12 instead of 16,
 * TASK_SIZE needs to reflect the new size.

This patch implements the above when the support for 52-bit VAs is
detected at early boot time.

On arm64 userspace addresses translation is controlled by TTBR0_EL1. As
well as userspace, TTBR0_EL1 controls:
 * The identity mapping,
 * EFI runtime code.

It is possible to run a kernel with an identity mapping that has a
larger VA size than userspace (and for this case __cpu_set_tcr_t0sz()
would set TCR_EL1.T0SZ as appropriate). However, when the conditions for
52-bit userspace are met; it is possible to keep TCR_EL1.T0SZ fixed at
12. Thus in this patch, the TCR_EL1.T0SZ size changing logic is
disabled.

Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Steve Capper <steve.capper@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
arch/arm64/Kconfig
arch/arm64/include/asm/assembler.h
arch/arm64/include/asm/mmu_context.h
arch/arm64/include/asm/processor.h
arch/arm64/kernel/head.S
arch/arm64/mm/fault.c
arch/arm64/mm/mmu.c
arch/arm64/mm/proc.S

index 6d2b25f51bb369439c4311ba6d3fd8505fc58501..858e353b2f40531b31c63d1980b8c480bdd75c49 100644 (file)
@@ -716,6 +716,10 @@ config ARM64_PA_BITS_52
 
 endchoice
 
+config ARM64_52BIT_VA
+       def_bool y
+       depends on ARM64_VA_BITS_48 && ARM64_64K_PAGES && (ARM64_PAN || !ARM64_SW_TTBR0_PAN)
+
 config ARM64_PA_BITS
        int
        default 48 if ARM64_PA_BITS_48
index ba609e0439e8973541dddb62cc2ef7e669ae1d90..122d91d4097a24107a1791980030fb4d945a0790 100644 (file)
@@ -357,11 +357,10 @@ alternative_endif
        .endm
 
 /*
- * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map
+ * tcr_set_t0sz - update TCR.T0SZ so that we can load the ID map
  */
-       .macro  tcr_set_idmap_t0sz, valreg, tmpreg
-       ldr_l   \tmpreg, idmap_t0sz
-       bfi     \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
+       .macro  tcr_set_t0sz, valreg, t0sz
+       bfi     \valreg, \t0sz, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
        .endm
 
 /*
index dfcfeffd20801cb69ca20ec53113c3e1dfd3479f..b0768502fa0828aacaedd185a7ab1495471b3720 100644 (file)
@@ -74,6 +74,9 @@ extern u64 idmap_ptrs_per_pgd;
 
 static inline bool __cpu_uses_extended_idmap(void)
 {
+       if (IS_ENABLED(CONFIG_ARM64_52BIT_VA))
+               return false;
+
        return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS));
 }
 
index 759927faf7f6829d960700f69ee3738ba9f000f9..7ff75e52b76264c4ef87b3fb7dc5626f3f40dd39 100644 (file)
 #ifndef __ASM_PROCESSOR_H
 #define __ASM_PROCESSOR_H
 
-#define TASK_SIZE_64           (UL(1) << VA_BITS)
-
-#define KERNEL_DS      UL(-1)
-#define USER_DS                (TASK_SIZE_64 - 1)
+#define KERNEL_DS              UL(-1)
+#ifdef CONFIG_ARM64_52BIT_VA
+#define USER_DS                        ((UL(1) << 52) - 1)
+#else
+#define USER_DS                        ((UL(1) << VA_BITS) - 1)
+#endif /* CONFIG_ARM64_52BIT_VA */
 
 /*
  * On arm64 systems, unaligned accesses by the CPU are cheap, and so there is
@@ -56,6 +58,9 @@
 
 #define DEFAULT_MAP_WINDOW_64  (UL(1) << VA_BITS)
 
+extern u64 vabits_user;
+#define TASK_SIZE_64           (UL(1) << vabits_user)
+
 #ifdef CONFIG_COMPAT
 #define TASK_SIZE_32           UL(0x100000000)
 #define TASK_SIZE              (test_thread_flag(TIF_32BIT) ? \
index 58fcc1edd852afef96e66d92dd3db37d9c043293..c229d9cfe9bf513e0a6aa0a824c4bc9f167c922d 100644 (file)
@@ -318,6 +318,19 @@ __create_page_tables:
        adrp    x0, idmap_pg_dir
        adrp    x3, __idmap_text_start          // __pa(__idmap_text_start)
 
+#ifdef CONFIG_ARM64_52BIT_VA
+       mrs_s   x6, SYS_ID_AA64MMFR2_EL1
+       and     x6, x6, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
+       mov     x5, #52
+       cbnz    x6, 1f
+#endif
+       mov     x5, #VA_BITS
+1:
+       adr_l   x6, vabits_user
+       str     x5, [x6]
+       dmb     sy
+       dc      ivac, x6                // Invalidate potentially stale cache line
+
        /*
         * VA_BITS may be too small to allow for an ID mapping to be created
         * that covers system RAM if that is located sufficiently high in the
index 7d9571f4ae3d68e4f08b863812e2b73566c5d962..5fe6d2e40e9b4be960ce12f73e69e00c3d7bfeef 100644 (file)
@@ -160,7 +160,7 @@ void show_pte(unsigned long addr)
 
        pr_alert("%s pgtable: %luk pages, %u-bit VAs, pgdp = %p\n",
                 mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K,
-                VA_BITS, mm->pgd);
+                mm == &init_mm ? VA_BITS : (int) vabits_user, mm->pgd);
        pgdp = pgd_offset(mm, addr);
        pgd = READ_ONCE(*pgdp);
        pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd));
index e1b2d58a311adc2ff6e155218c87a2a8188499e4..0d3eacc4bfbbf7803d0b0a51783f584bc41c7b64 100644 (file)
@@ -52,6 +52,7 @@
 
 u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
 u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
+u64 vabits_user __ro_after_init;
 
 u64 kimage_voffset __ro_after_init;
 EXPORT_SYMBOL(kimage_voffset);
index 2db1c491d45d613f7f80cde3352210156e929cf6..0cf86b17714cc9616d96fa288b3aa99c01cd7ff9 100644 (file)
@@ -450,7 +450,15 @@ ENTRY(__cpu_setup)
        ldr     x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
                        TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
                        TCR_TBI0 | TCR_A1
-       tcr_set_idmap_t0sz      x10, x9
+
+#ifdef CONFIG_ARM64_52BIT_VA
+       ldr_l           x9, vabits_user
+       sub             x9, xzr, x9
+       add             x9, x9, #64
+#else
+       ldr_l           x9, idmap_t0sz
+#endif
+       tcr_set_t0sz    x10, x9
 
        /*
         * Set the IPS bits in TCR_EL1.