KVM: MMU: Simplify kvm_mmu_zap_page()
authorAvi Kivity <avi@qumranet.com>
Fri, 11 Jul 2008 15:07:26 +0000 (18:07 +0300)
committerAvi Kivity <avi@qumranet.com>
Wed, 15 Oct 2008 08:15:12 +0000 (10:15 +0200)
The twisty maze of conditionals can be reduced.

[joerg: fix tlb flushing]

Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
arch/x86/kvm/mmu.c

index 81016a3..c3afbfe 100644 (file)
@@ -955,7 +955,6 @@ static void kvm_mmu_page_unlink_children(struct kvm *kvm,
                                rmap_remove(kvm, &pt[i]);
                        pt[i] = shadow_trap_nonpresent_pte;
                }
-               kvm_flush_remote_tlbs(kvm);
                return;
        }
 
@@ -974,7 +973,6 @@ static void kvm_mmu_page_unlink_children(struct kvm *kvm,
                }
                pt[i] = shadow_trap_nonpresent_pte;
        }
-       kvm_flush_remote_tlbs(kvm);
 }
 
 static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
@@ -1016,18 +1014,16 @@ static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
        ++kvm->stat.mmu_shadow_zapped;
        kvm_mmu_page_unlink_children(kvm, sp);
        kvm_mmu_unlink_parents(kvm, sp);
+       kvm_flush_remote_tlbs(kvm);
+       if (!sp->role.invalid && !sp->role.metaphysical)
+               unaccount_shadowed(kvm, sp->gfn);
        if (!sp->root_count) {
-               if (!sp->role.metaphysical && !sp->role.invalid)
-                       unaccount_shadowed(kvm, sp->gfn);
                hlist_del(&sp->hash_link);
                kvm_mmu_free_page(kvm, sp);
        } else {
-               int invalid = sp->role.invalid;
-               list_move(&sp->link, &kvm->arch.active_mmu_pages);
                sp->role.invalid = 1;
+               list_move(&sp->link, &kvm->arch.active_mmu_pages);
                kvm_reload_remote_mmus(kvm);
-               if (!sp->role.metaphysical && !invalid)
-                       unaccount_shadowed(kvm, sp->gfn);
        }
        kvm_mmu_reset_last_pte_updated(kvm);
 }
@@ -1842,7 +1838,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
        index = kvm_page_table_hashfn(gfn);
        bucket = &vcpu->kvm->arch.mmu_page_hash[index];
        hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
-               if (sp->gfn != gfn || sp->role.metaphysical)
+               if (sp->gfn != gfn || sp->role.metaphysical || sp->role.invalid)
                        continue;
                pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
                misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);