summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2018-10-15 11:32:06 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-10-18 09:16:27 +0200
commit1df9a0a8201b4ba2e58f54c0b88810a8cc4f3fbd (patch)
tree4a0b35a99427098d354acd986b4dea6748bff201 /arch
parent75e48eff8aae50cabc9124cb39b31ef94c713c6a (diff)
ARM: KVM: invalidate icache on guest exit for Cortex-A15
Commit 0c47ac8cd157727e7a532d665d6fb1b5fd333977 upstream. In order to avoid aliasing attacks against the branch predictor on Cortex-A15, let's invalidate the BTB on guest exit, which can only be done by invalidating the icache (with ACTLR[0] being set). We use the same hack as for A12/A17 to perform the vector decoding. Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk> Boot-tested-by: Tony Lindgren <tony@atomide.com> Reviewed-by: Tony Lindgren <tony@atomide.com> Signed-off-by: David A. Long <dave.long@linaro.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/include/asm/kvm_mmu.h5
-rw-r--r--arch/arm/kvm/hyp/hyp-entry.S24
2 files changed, 29 insertions, 0 deletions
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 85d48c9e1587..082b2861926d 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -255,6 +255,11 @@ static inline void *kvm_get_hyp_vector(void)
return kvm_ksym_ref(__kvm_hyp_vector_bp_inv);
}
+ case ARM_CPU_PART_CORTEX_A15:
+ {
+ extern char __kvm_hyp_vector_ic_inv[];
+ return kvm_ksym_ref(__kvm_hyp_vector_ic_inv);
+ }
#endif
default:
{
diff --git a/arch/arm/kvm/hyp/hyp-entry.S b/arch/arm/kvm/hyp/hyp-entry.S
index e789f52a5129..918a05dd2d63 100644
--- a/arch/arm/kvm/hyp/hyp-entry.S
+++ b/arch/arm/kvm/hyp/hyp-entry.S
@@ -73,6 +73,28 @@ __kvm_hyp_vector:
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
.align 5
+__kvm_hyp_vector_ic_inv:
+ .global __kvm_hyp_vector_ic_inv
+
+ /*
+ * We encode the exception entry in the bottom 3 bits of
+ * SP, and we have to guarantee to be 8 bytes aligned.
+ */
+ W(add) sp, sp, #1 /* Reset 7 */
+ W(add) sp, sp, #1 /* Undef 6 */
+ W(add) sp, sp, #1 /* Syscall 5 */
+ W(add) sp, sp, #1 /* Prefetch abort 4 */
+ W(add) sp, sp, #1 /* Data abort 3 */
+ W(add) sp, sp, #1 /* HVC 2 */
+ W(add) sp, sp, #1 /* IRQ 1 */
+ W(nop) /* FIQ 0 */
+
+ mcr p15, 0, r0, c7, c5, 0 /* ICIALLU */
+ isb
+
+ b decode_vectors
+
+ .align 5
__kvm_hyp_vector_bp_inv:
.global __kvm_hyp_vector_bp_inv
@@ -92,6 +114,8 @@ __kvm_hyp_vector_bp_inv:
mcr p15, 0, r0, c7, c5, 6 /* BPIALL */
isb
+decode_vectors:
+
#ifdef CONFIG_THUMB2_KERNEL
/*
* Yet another silly hack: Use VPIDR as a temp register.