| From foo@baz Wed Nov 21 19:20:53 CET 2018 |
| From: David Long <dave.long@linaro.org> |
| Date: Wed, 7 Nov 2018 11:43:49 -0500 |
| Subject: ARM: KVM: invalidate BTB on guest exit for Cortex-A12/A17 |
| To: stable@vger.kernel.org, Russell King - ARM Linux <linux@armlinux.org.uk>, Florian Fainelli <f.fainelli@gmail.com>, Tony Lindgren <tony@atomide.com>, Marc Zyngier <marc.zyngier@arm.com>, Mark Rutland <mark.rutland@arm.com> |
| Cc: Greg KH <gregkh@linuxfoundation.org>, Mark Brown <broonie@kernel.org> |
| Message-ID: <20181107164402.9380-12-dave.long@linaro.org> |
| |
| From: Marc Zyngier <marc.zyngier@arm.com> |
| |
| Commit 3f7e8e2e1ebda787f156ce46e3f0a9ce2833fa4f upstream. |
| |
| In order to avoid aliasing attacks against the branch predictor, |
| let's invalidate the BTB on guest exit. This is made complicated |
| by the fact that we cannot take a branch before invalidating the |
| BTB. |
| |
| We only apply this to A12 and A17, which are the only two ARM |
| cores on which this useful. |
| |
| Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> |
| Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk> |
| Boot-tested-by: Tony Lindgren <tony@atomide.com> |
| Reviewed-by: Tony Lindgren <tony@atomide.com> |
| Signed-off-by: David A. Long <dave.long@linaro.org> |
| Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
| --- |
| arch/arm/include/asm/kvm_asm.h | 2 - |
| arch/arm/include/asm/kvm_mmu.h | 17 +++++++++ |
| arch/arm/kvm/hyp/hyp-entry.S | 71 +++++++++++++++++++++++++++++++++++++++-- |
| 3 files changed, 85 insertions(+), 5 deletions(-) |
| |
| --- a/arch/arm/include/asm/kvm_asm.h |
| +++ b/arch/arm/include/asm/kvm_asm.h |
| @@ -61,8 +61,6 @@ struct kvm_vcpu; |
| extern char __kvm_hyp_init[]; |
| extern char __kvm_hyp_init_end[]; |
| |
| -extern char __kvm_hyp_vector[]; |
| - |
| extern void __kvm_flush_vm_context(void); |
| extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); |
| extern void __kvm_tlb_flush_vmid(struct kvm *kvm); |
| --- a/arch/arm/include/asm/kvm_mmu.h |
| +++ b/arch/arm/include/asm/kvm_mmu.h |
| @@ -248,7 +248,22 @@ static inline int kvm_read_guest_lock(st |
| |
| static inline void *kvm_get_hyp_vector(void) |
| { |
| - return kvm_ksym_ref(__kvm_hyp_vector); |
| + switch(read_cpuid_part()) { |
| +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR |
| + case ARM_CPU_PART_CORTEX_A12: |
| + case ARM_CPU_PART_CORTEX_A17: |
| + { |
| + extern char __kvm_hyp_vector_bp_inv[]; |
| + return kvm_ksym_ref(__kvm_hyp_vector_bp_inv); |
| + } |
| + |
| +#endif |
| + default: |
| + { |
| + extern char __kvm_hyp_vector[]; |
| + return kvm_ksym_ref(__kvm_hyp_vector); |
| + } |
| + } |
| } |
| |
| static inline int kvm_map_vectors(void) |
| --- a/arch/arm/kvm/hyp/hyp-entry.S |
| +++ b/arch/arm/kvm/hyp/hyp-entry.S |
| @@ -71,6 +71,66 @@ __kvm_hyp_vector: |
| W(b) hyp_irq |
| W(b) hyp_fiq |
| |
| +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR |
| + .align 5 |
| +__kvm_hyp_vector_bp_inv: |
| + .global __kvm_hyp_vector_bp_inv |
| + |
| + /* |
| + * We encode the exception entry in the bottom 3 bits of |
| + * SP, and we have to guarantee to be 8 bytes aligned. |
| + */ |
| + W(add) sp, sp, #1 /* Reset 7 */ |
| + W(add) sp, sp, #1 /* Undef 6 */ |
| + W(add) sp, sp, #1 /* Syscall 5 */ |
| + W(add) sp, sp, #1 /* Prefetch abort 4 */ |
| + W(add) sp, sp, #1 /* Data abort 3 */ |
| + W(add) sp, sp, #1 /* HVC 2 */ |
| + W(add) sp, sp, #1 /* IRQ 1 */ |
| + W(nop) /* FIQ 0 */ |
| + |
| + mcr p15, 0, r0, c7, c5, 6 /* BPIALL */ |
| + isb |
| + |
| +#ifdef CONFIG_THUMB2_KERNEL |
| + /* |
| + * Yet another silly hack: Use VPIDR as a temp register. |
| + * Thumb2 is really a pain, as SP cannot be used with most |
| + * of the bitwise instructions. The vect_br macro ensures |
| + * things gets cleaned-up. |
| + */ |
| + mcr p15, 4, r0, c0, c0, 0 /* VPIDR */ |
| + mov r0, sp |
| + and r0, r0, #7 |
| + sub sp, sp, r0 |
| + push {r1, r2} |
| + mov r1, r0 |
| + mrc p15, 4, r0, c0, c0, 0 /* VPIDR */ |
| + mrc p15, 0, r2, c0, c0, 0 /* MIDR */ |
| + mcr p15, 4, r2, c0, c0, 0 /* VPIDR */ |
| +#endif |
| + |
| +.macro vect_br val, targ |
| +ARM( eor sp, sp, #\val ) |
| +ARM( tst sp, #7 ) |
| +ARM( eorne sp, sp, #\val ) |
| + |
| +THUMB( cmp r1, #\val ) |
| +THUMB( popeq {r1, r2} ) |
| + |
| + beq \targ |
| +.endm |
| + |
| + vect_br 0, hyp_fiq |
| + vect_br 1, hyp_irq |
| + vect_br 2, hyp_hvc |
| + vect_br 3, hyp_dabt |
| + vect_br 4, hyp_pabt |
| + vect_br 5, hyp_svc |
| + vect_br 6, hyp_undef |
| + vect_br 7, hyp_reset |
| +#endif |
| + |
| .macro invalid_vector label, cause |
| .align |
| \label: mov r0, #\cause |
| @@ -131,7 +191,14 @@ hyp_hvc: |
| mrceq p15, 4, r0, c12, c0, 0 @ get HVBAR |
| beq 1f |
| |
| - push {lr} |
| + /* |
| + * Pushing r2 here is just a way of keeping the stack aligned to |
| + * 8 bytes on any path that can trigger a HYP exception. Here, |
| + * we may well be about to jump into the guest, and the guest |
| + * exit would otherwise be badly decoded by our fancy |
| + * "decode-exception-without-a-branch" code... |
| + */ |
| + push {r2, lr} |
| |
| mov lr, r0 |
| mov r0, r1 |
| @@ -141,7 +208,7 @@ hyp_hvc: |
| THUMB( orr lr, #1) |
| blx lr @ Call the HYP function |
| |
| - pop {lr} |
| + pop {r2, lr} |
| 1: eret |
| |
| guest_trap: |