]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
arm64: trap userspace "dc cvau" cache operation on errata-affected core
authorAndre Przywara <andre.przywara@arm.com>
Tue, 28 Jun 2016 17:07:32 +0000 (18:07 +0100)
committerCatalin Marinas <catalin.marinas@arm.com>
Fri, 1 Jul 2016 10:46:00 +0000 (11:46 +0100)
The ARM errata 819472, 826319, 827319 and 824069 for affected
Cortex-A53 cores demand to promote "dc cvau" instructions to
"dc civac". Since we allow userspace to also emit those instructions,
we should make sure that "dc cvau" gets promoted there too.
So lets grasp the nettle here and actually trap every userland cache
maintenance instruction once we detect at least one affected core in
the system.
We then emulate the instruction by executing it on behalf of userland,
promoting "dc cvau" to "dc civac" on the way and injecting access
fault back into userspace.

Signed-off-by: Andre Przywara <andre.przywara@arm.com>
[catalin.marinas@arm.com: s/set_segfault/arm64_notify_segfault/]
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/include/asm/processor.h
arch/arm64/include/asm/sysreg.h
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/entry.S
arch/arm64/kernel/traps.c

index cef1cf398356f1f61ceea854fc564f6df1d316cc..ace0a96e7d6e756fa54802ab9ac88740b11633d6 100644 (file)
@@ -192,5 +192,6 @@ static inline void spin_lock_prefetch(const void *ptr)
 
 void cpu_enable_pan(void *__unused);
 void cpu_enable_uao(void *__unused);
+void cpu_enable_cache_maint_trap(void *__unused);
 
 #endif /* __ASM_PROCESSOR_H */
index 751e901c8d3779c12e8e16669ea910448b927d4c..cc06794b7346bf566b6edbb308e242c6b6d95815 100644 (file)
                         SCTLR_ELx_SA | SCTLR_ELx_I)
 
 /* SCTLR_EL1 specific flags. */
+#define SCTLR_EL1_UCI          (1 << 26)
 #define SCTLR_EL1_SPAN         (1 << 23)
 #define SCTLR_EL1_SED          (1 << 8)
 #define SCTLR_EL1_CP15BEN      (1 << 5)
 
-
 /* id_aa64isar0 */
 #define ID_AA64ISAR0_RDM_SHIFT         28
 #define ID_AA64ISAR0_ATOMICS_SHIFT     20
index c2261a7a316efe17010e6eddcbe51eadb56dd10c..af647d2cee223cee81241f6c181d8abec4dc140f 100644 (file)
@@ -46,6 +46,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
                .desc = "ARM errata 826319, 827319, 824069",
                .capability = ARM64_WORKAROUND_CLEAN_CACHE,
                MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x02),
+               .enable = cpu_enable_cache_maint_trap,
        },
 #endif
 #ifdef CONFIG_ARM64_ERRATUM_819472
@@ -54,6 +55,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
                .desc = "ARM errata 819472",
                .capability = ARM64_WORKAROUND_CLEAN_CACHE,
                MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x01),
+               .enable = cpu_enable_cache_maint_trap,
        },
 #endif
 #ifdef CONFIG_ARM64_ERRATUM_832075
index eefffa81c6df48937dba779c33ac0174c8572e34..3eca5d34f7a682245e1ef4e7210a0b214e87cfac 100644 (file)
@@ -451,7 +451,7 @@ el0_sync:
        cmp     x24, #ESR_ELx_EC_FP_EXC64       // FP/ASIMD exception
        b.eq    el0_fpsimd_exc
        cmp     x24, #ESR_ELx_EC_SYS64          // configurable trap
-       b.eq    el0_undef
+       b.eq    el0_sys
        cmp     x24, #ESR_ELx_EC_SP_ALIGN       // stack alignment exception
        b.eq    el0_sp_pc
        cmp     x24, #ESR_ELx_EC_PC_ALIGN       // pc alignment exception
@@ -579,6 +579,16 @@ el0_undef:
        mov     x0, sp
        bl      do_undefinstr
        b       ret_to_user
+el0_sys:
+       /*
+        * System instructions, for trapped cache maintenance instructions
+        */
+       enable_dbg_and_irq
+       ct_user_exit
+       mov     x0, x25
+       mov     x1, sp
+       bl      do_sysinstr
+       b       ret_to_user
 el0_dbg:
        /*
         * Debug exception handling
index d8a5366dcc240ee52364011eabb208b9d5ada0ec..e04f83873af76244c5785782b3b60283250bab04 100644 (file)
@@ -41,6 +41,7 @@
 #include <asm/stacktrace.h>
 #include <asm/exception.h>
 #include <asm/system_misc.h>
+#include <asm/sysreg.h>
 
 static const char *handler[]= {
        "Synchronous Abort",
@@ -427,6 +428,65 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
        force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
 }
 
+void cpu_enable_cache_maint_trap(void *__unused)
+{
+       config_sctlr_el1(SCTLR_EL1_UCI, 0);
+}
+
+#define __user_cache_maint(insn, address, res)                 \
+       asm volatile (                                          \
+               "1:     " insn ", %1\n"                         \
+               "       mov     %w0, #0\n"                      \
+               "2:\n"                                          \
+               "       .pushsection .fixup,\"ax\"\n"           \
+               "       .align  2\n"                            \
+               "3:     mov     %w0, %w2\n"                     \
+               "       b       2b\n"                           \
+               "       .popsection\n"                          \
+               _ASM_EXTABLE(1b, 3b)                            \
+               : "=r" (res)                                    \
+               : "r" (address), "i" (-EFAULT) )
+
+asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs)
+{
+       unsigned long address;
+       int ret;
+
+       /* if this is a write with: Op0=1, Op2=1, Op1=3, CRn=7 */
+       if ((esr & 0x01fffc01) == 0x0012dc00) {
+               int rt = (esr >> 5) & 0x1f;
+               int crm = (esr >> 1) & 0x0f;
+
+               address = (rt == 31) ? 0 : regs->regs[rt];
+
+               switch (crm) {
+               case 11:                /* DC CVAU, gets promoted */
+                       __user_cache_maint("dc civac", address, ret);
+                       break;
+               case 10:                /* DC CVAC, gets promoted */
+                       __user_cache_maint("dc civac", address, ret);
+                       break;
+               case 14:                /* DC CIVAC */
+                       __user_cache_maint("dc civac", address, ret);
+                       break;
+               case 5:                 /* IC IVAU */
+                       __user_cache_maint("ic ivau", address, ret);
+                       break;
+               default:
+                       force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
+                       return;
+               }
+       } else {
+               force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
+               return;
+       }
+
+       if (ret)
+               arm64_notify_segfault(regs, address);
+       else
+               regs->pc += 4;
+}
+
 long compat_arm_syscall(struct pt_regs *regs);
 
 asmlinkage long do_ni_syscall(struct pt_regs *regs)