KVM: MMU: Fix spte leak when freeing an invalid page

An invalid page may still contain sptes, as the current vcpu may not have
had the chance to unload its root after the page was marked invalid.

Signed-off-by: Avi Kivity <avi@qumranet.com>
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 33cc39c..2b60b7d 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -932,12 +932,10 @@
 static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
 {
 	++kvm->stat.mmu_shadow_zapped;
-	if (!sp->role.invalid) {
-		kvm_mmu_page_unlink_children(kvm, sp);
-		kvm_mmu_unlink_parents(kvm, sp);
-		if (!sp->role.metaphysical)
-			unaccount_shadowed(kvm, sp->gfn);
-	}
+	kvm_mmu_page_unlink_children(kvm, sp);
+	kvm_mmu_unlink_parents(kvm, sp);
+	if (!sp->role.invalid && !sp->role.metaphysical)
+		unaccount_shadowed(kvm, sp->gfn);
 	if (!sp->root_count) {
 		hlist_del(&sp->hash_link);
 		kvm_mmu_free_page(kvm, sp);