| From: Ben Hutchings <ben@decadent.org.uk> |
| Date: Thu, 15 Oct 2015 01:20:29 +0100 |
| Subject: Revert "KVM: MMU: fix validation of mmio page fault" |
| |
| This reverts commit 41e3025eacd6daafc40c3e7850fbcabc8b847805, which |
| was commit 6f691251c0350ac52a007c54bf3ef62e9d8cdc5e upstream. |
| |
| The fix is only needed after commit f8f559422b6c ("KVM: MMU: fast |
| invalidate all mmio sptes"), included in Linux 3.11. |
| |
| Signed-off-by: Ben Hutchings <ben@decadent.org.uk> |
| --- |
| arch/x86/kvm/mmu.c | 45 +++++++++++++++++++++++++++++++++++++++++++++ |
| 1 file changed, 45 insertions(+) |
| |
| diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c |
| index cac7b2b..4a949c7 100644 |
| --- a/arch/x86/kvm/mmu.c |
| +++ b/arch/x86/kvm/mmu.c |
| @@ -326,6 +326,12 @@ static u64 __get_spte_lockless(u64 *sptep) |
| { |
| return ACCESS_ONCE(*sptep); |
| } |
| + |
| +static bool __check_direct_spte_mmio_pf(u64 spte) |
| +{ |
| + /* It is valid if the spte is zapped. */ |
| + return spte == 0ull; |
| +} |
| #else |
| union split_spte { |
| struct { |
| @@ -430,6 +436,23 @@ retry: |
| |
| return spte.spte; |
| } |
| + |
| +static bool __check_direct_spte_mmio_pf(u64 spte) |
| +{ |
| + union split_spte sspte = (union split_spte)spte; |
| + u32 high_mmio_mask = shadow_mmio_mask >> 32; |
| + |
| + /* It is valid if the spte is zapped. */ |
| + if (spte == 0ull) |
| + return true; |
| + |
| + /* It is valid if the spte is being zapped. */ |
| + if (sspte.spte_low == 0ull && |
| + (sspte.spte_high & high_mmio_mask) == high_mmio_mask) |
| + return true; |
| + |
| + return false; |
| +} |
| #endif |
| |
| static bool spte_has_volatile_bits(u64 spte) |
| @@ -2872,6 +2895,21 @@ static bool quickly_check_mmio_pf(struct kvm_vcpu *vcpu, u64 addr, bool direct) |
| return vcpu_match_mmio_gva(vcpu, addr); |
| } |
| |
| + |
| +/* |
| + * On direct hosts, the last spte is only allows two states |
| + * for mmio page fault: |
| + * - It is the mmio spte |
| + * - It is zapped or it is being zapped. |
| + * |
| + * This function completely checks the spte when the last spte |
| + * is not the mmio spte. |
| + */ |
| +static bool check_direct_spte_mmio_pf(u64 spte) |
| +{ |
| + return __check_direct_spte_mmio_pf(spte); |
| +} |
| + |
| static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr) |
| { |
| struct kvm_shadow_walk_iterator iterator; |
| @@ -2913,6 +2951,13 @@ int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct) |
| } |
| |
| /* |
| + * It's ok if the gva is remapped by other cpus on shadow guest, |
| + * it's a BUG if the gfn is not a mmio page. |
| + */ |
| + if (direct && !check_direct_spte_mmio_pf(spte)) |
| + return -1; |
| + |
| + /* |
| * If the page table is zapped by other cpus, let CPU fault again on |
| * the address. |
| */ |
| |