]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
ARM: KVM: invalidate BTB on guest exit for Cortex-A12/A17
authorMarc Zyngier <marc.zyngier@arm.com>
Thu, 1 Feb 2018 11:07:35 +0000 (11:07 +0000)
committerRussell King <rmk+kernel@armlinux.org.uk>
Thu, 31 May 2018 10:09:03 +0000 (11:09 +0100)
In order to avoid aliasing attacks against the branch predictor,
let's invalidate the BTB on guest exit. This is made complicated
by the fact that we cannot take a branch before invalidating the
BTB.

We only apply this to A12 and A17, which are the only two ARM
cores on which this useful.

Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
Boot-tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Tony Lindgren <tony@atomide.com>
arch/arm/include/asm/kvm_asm.h
arch/arm/include/asm/kvm_mmu.h
arch/arm/kvm/hyp/hyp-entry.S

index 36dd2962a42db2868badb568e38118884321eddb..df24ed48977d5e78db414c0234f14b40c66d83be 100644 (file)
@@ -61,8 +61,6 @@ struct kvm_vcpu;
 extern char __kvm_hyp_init[];
 extern char __kvm_hyp_init_end[];
 
-extern char __kvm_hyp_vector[];
-
 extern void __kvm_flush_vm_context(void);
 extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
 extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
index de1b919404e43a3c01cc8511b92862d4fa7c3f42..d08ce9c41df4f2751b7d7ba280c4b3862a6341ec 100644 (file)
@@ -297,7 +297,22 @@ static inline unsigned int kvm_get_vmid_bits(void)
 
 static inline void *kvm_get_hyp_vector(void)
 {
-       return kvm_ksym_ref(__kvm_hyp_vector);
+       switch(read_cpuid_part()) {
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+       case ARM_CPU_PART_CORTEX_A12:
+       case ARM_CPU_PART_CORTEX_A17:
+       {
+               extern char __kvm_hyp_vector_bp_inv[];
+               return kvm_ksym_ref(__kvm_hyp_vector_bp_inv);
+       }
+
+#endif
+       default:
+       {
+               extern char __kvm_hyp_vector[];
+               return kvm_ksym_ref(__kvm_hyp_vector);
+       }
+       }
 }
 
 static inline int kvm_map_vectors(void)
index 95a2faefc070f5c4b172c65dfdb5dd85b8b3df89..e789f52a51290e2b8c6b5b011b3992c25aa28174 100644 (file)
@@ -71,6 +71,66 @@ __kvm_hyp_vector:
        W(b)    hyp_irq
        W(b)    hyp_fiq
 
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+       .align 5
+__kvm_hyp_vector_bp_inv:
+       .global __kvm_hyp_vector_bp_inv
+
+       /*
+        * We encode the exception entry in the bottom 3 bits of
+        * SP, and we have to guarantee to be 8 bytes aligned.
+        */
+       W(add)  sp, sp, #1      /* Reset          7 */
+       W(add)  sp, sp, #1      /* Undef          6 */
+       W(add)  sp, sp, #1      /* Syscall        5 */
+       W(add)  sp, sp, #1      /* Prefetch abort 4 */
+       W(add)  sp, sp, #1      /* Data abort     3 */
+       W(add)  sp, sp, #1      /* HVC            2 */
+       W(add)  sp, sp, #1      /* IRQ            1 */
+       W(nop)                  /* FIQ            0 */
+
+       mcr     p15, 0, r0, c7, c5, 6   /* BPIALL */
+       isb
+
+#ifdef CONFIG_THUMB2_KERNEL
+       /*
+        * Yet another silly hack: Use VPIDR as a temp register.
+        * Thumb2 is really a pain, as SP cannot be used with most
+        * of the bitwise instructions. The vect_br macro ensures
+        * things gets cleaned-up.
+        */
+       mcr     p15, 4, r0, c0, c0, 0   /* VPIDR */
+       mov     r0, sp
+       and     r0, r0, #7
+       sub     sp, sp, r0
+       push    {r1, r2}
+       mov     r1, r0
+       mrc     p15, 4, r0, c0, c0, 0   /* VPIDR */
+       mrc     p15, 0, r2, c0, c0, 0   /* MIDR  */
+       mcr     p15, 4, r2, c0, c0, 0   /* VPIDR */
+#endif
+
+.macro vect_br val, targ
+ARM(   eor     sp, sp, #\val   )
+ARM(   tst     sp, #7          )
+ARM(   eorne   sp, sp, #\val   )
+
+THUMB( cmp     r1, #\val       )
+THUMB( popeq   {r1, r2}        )
+
+       beq     \targ
+.endm
+
+       vect_br 0, hyp_fiq
+       vect_br 1, hyp_irq
+       vect_br 2, hyp_hvc
+       vect_br 3, hyp_dabt
+       vect_br 4, hyp_pabt
+       vect_br 5, hyp_svc
+       vect_br 6, hyp_undef
+       vect_br 7, hyp_reset
+#endif
+
 .macro invalid_vector label, cause
        .align
 \label:        mov     r0, #\cause
@@ -149,7 +209,14 @@ hyp_hvc:
        bx      ip
 
 1:
-       push    {lr}
+       /*
+        * Pushing r2 here is just a way of keeping the stack aligned to
+        * 8 bytes on any path that can trigger a HYP exception. Here,
+        * we may well be about to jump into the guest, and the guest
+        * exit would otherwise be badly decoded by our fancy
+        * "decode-exception-without-a-branch" code...
+        */
+       push    {r2, lr}
 
        mov     lr, r0
        mov     r0, r1
@@ -159,7 +226,7 @@ hyp_hvc:
 THUMB( orr     lr, #1)
        blx     lr                      @ Call the HYP function
 
-       pop     {lr}
+       pop     {r2, lr}
        eret
 
 guest_trap: