KVM: MMU: fix hashing for TDP and non-paging modes
[safe/jmp/linux-2.6] / arch / x86 / kvm / mmu.c
index b57be03..9696d65 100644 (file)
@@ -173,7 +173,7 @@ struct kvm_shadow_walk_iterator {
             shadow_walk_okay(&(_walker));                      \
             shadow_walk_next(&(_walker)))
 
-typedef int (*mmu_parent_walk_fn) (struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp);
+typedef int (*mmu_parent_walk_fn) (struct kvm_mmu_page *sp);
 
 static struct kmem_cache *pte_chain_cache;
 static struct kmem_cache *rmap_desc_cache;
@@ -431,9 +431,9 @@ static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
        int i;
 
        gfn = unalias_gfn(kvm, gfn);
+       slot = gfn_to_memslot_unaliased(kvm, gfn);
        for (i = PT_DIRECTORY_LEVEL;
             i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
-               slot          = gfn_to_memslot_unaliased(kvm, gfn);
                write_count   = slot_largepage_idx(gfn, slot, i);
                *write_count -= 1;
                WARN_ON(*write_count < 0);
@@ -647,7 +647,6 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
 static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
 {
        struct kvm_rmap_desc *desc;
-       struct kvm_rmap_desc *prev_desc;
        u64 *prev_spte;
        int i;
 
@@ -659,7 +658,6 @@ static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
                return NULL;
        }
        desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
-       prev_desc = NULL;
        prev_spte = NULL;
        while (desc) {
                for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i) {
@@ -787,7 +785,7 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
        int retval = 0;
        struct kvm_memslots *slots;
 
-       slots = rcu_dereference(kvm->memslots);
+       slots = kvm_memslots(kvm);
 
        for (i = 0; i < slots->nmemslots; i++) {
                struct kvm_memory_slot *memslot = &slots->memslots[i];
@@ -1001,8 +999,7 @@ static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
 }
 
 
-static void mmu_parent_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
-                           mmu_parent_walk_fn fn)
+static void mmu_parent_walk(struct kvm_mmu_page *sp, mmu_parent_walk_fn fn)
 {
        struct kvm_pte_chain *pte_chain;
        struct hlist_node *node;
@@ -1011,8 +1008,8 @@ static void mmu_parent_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
 
        if (!sp->multimapped && sp->parent_pte) {
                parent_sp = page_header(__pa(sp->parent_pte));
-               fn(vcpu, parent_sp);
-               mmu_parent_walk(vcpu, parent_sp, fn);
+               fn(parent_sp);
+               mmu_parent_walk(parent_sp, fn);
                return;
        }
        hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
@@ -1020,8 +1017,8 @@ static void mmu_parent_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
                        if (!pte_chain->parent_ptes[i])
                                break;
                        parent_sp = page_header(__pa(pte_chain->parent_ptes[i]));
-                       fn(vcpu, parent_sp);
-                       mmu_parent_walk(vcpu, parent_sp, fn);
+                       fn(parent_sp);
+                       mmu_parent_walk(parent_sp, fn);
                }
 }
 
@@ -1058,16 +1055,15 @@ static void kvm_mmu_update_parents_unsync(struct kvm_mmu_page *sp)
                }
 }
 
-static int unsync_walk_fn(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
+static int unsync_walk_fn(struct kvm_mmu_page *sp)
 {
        kvm_mmu_update_parents_unsync(sp);
        return 1;
 }
 
-static void kvm_mmu_mark_parents_unsync(struct kvm_vcpu *vcpu,
-                                       struct kvm_mmu_page *sp)
+static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
 {
-       mmu_parent_walk(vcpu, sp, unsync_walk_fn);
+       mmu_parent_walk(sp, unsync_walk_fn);
        kvm_mmu_update_parents_unsync(sp);
 }
 
@@ -1345,7 +1341,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
                        mmu_page_add_parent_pte(vcpu, sp, parent_pte);
                        if (sp->unsync_children) {
                                set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests);
-                               kvm_mmu_mark_parents_unsync(vcpu, sp);
+                               kvm_mmu_mark_parents_unsync(sp);
                        }
                        trace_kvm_mmu_get_page(sp, false);
                        return sp;
@@ -1567,13 +1563,14 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
        r = 0;
        index = kvm_page_table_hashfn(gfn);
        bucket = &kvm->arch.mmu_page_hash[index];
+restart:
        hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
                if (sp->gfn == gfn && !sp->role.direct) {
                        pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
                                 sp->role.word);
                        r = 1;
                        if (kvm_mmu_zap_page(kvm, sp))
-                               n = bucket->first;
+                               goto restart;
                }
        return r;
 }
@@ -1587,13 +1584,14 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
 
        index = kvm_page_table_hashfn(gfn);
        bucket = &kvm->arch.mmu_page_hash[index];
+restart:
        hlist_for_each_entry_safe(sp, node, nn, bucket, hash_link) {
                if (sp->gfn == gfn && !sp->role.direct
                    && !sp->role.invalid) {
                        pgprintk("%s: zap %lx %x\n",
                                 __func__, gfn, sp->role.word);
                        if (kvm_mmu_zap_page(kvm, sp))
-                               nn = bucket->first;
+                               goto restart;
                }
        }
 }
@@ -1620,20 +1618,6 @@ static void mmu_convert_notrap(struct kvm_mmu_page *sp)
        }
 }
 
-struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
-{
-       struct page *page;
-
-       gpa_t gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
-
-       if (gpa == UNMAPPED_GVA)
-               return NULL;
-
-       page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
-
-       return page;
-}
-
 /*
  * The function is based on mtrr_type_lookup() in
  * arch/x86/kernel/cpu/mtrr/generic.c
@@ -1759,7 +1743,7 @@ static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
        ++vcpu->kvm->stat.mmu_unsync;
        sp->unsync = 1;
 
-       kvm_mmu_mark_parents_unsync(vcpu, sp);
+       kvm_mmu_mark_parents_unsync(sp);
 
        mmu_convert_notrap(sp);
        return 0;
@@ -2075,10 +2059,12 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
                hpa_t root = vcpu->arch.mmu.root_hpa;
 
                ASSERT(!VALID_PAGE(root));
-               if (tdp_enabled)
-                       direct = 1;
                if (mmu_check_root(vcpu, root_gfn))
                        return 1;
+               if (tdp_enabled) {
+                       direct = 1;
+                       root_gfn = 0;
+               }
                sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
                                      PT64_ROOT_LEVEL, direct,
                                      ACC_ALL, NULL);
@@ -2088,8 +2074,6 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
                return 0;
        }
        direct = !is_paging(vcpu);
-       if (tdp_enabled)
-               direct = 1;
        for (i = 0; i < 4; ++i) {
                hpa_t root = vcpu->arch.mmu.pae_root[i];
 
@@ -2105,6 +2089,10 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
                        root_gfn = 0;
                if (mmu_check_root(vcpu, root_gfn))
                        return 1;
+               if (tdp_enabled) {
+                       direct = 1;
+                       root_gfn = i << 30;
+               }
                sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
                                      PT32_ROOT_LEVEL, direct,
                                      ACC_ALL, NULL);
@@ -2673,6 +2661,8 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
        }
        index = kvm_page_table_hashfn(gfn);
        bucket = &vcpu->kvm->arch.mmu_page_hash[index];
+
+restart:
        hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
                if (sp->gfn != gfn || sp->role.direct || sp->role.invalid)
                        continue;
@@ -2693,7 +2683,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
                        pgprintk("misaligned: gpa %llx bytes %d role %x\n",
                                 gpa, bytes, sp->role.word);
                        if (kvm_mmu_zap_page(vcpu->kvm, sp))
-                               n = bucket->first;
+                               goto restart;
                        ++vcpu->kvm->stat.mmu_flooded;
                        continue;
                }
@@ -2902,10 +2892,11 @@ void kvm_mmu_zap_all(struct kvm *kvm)
        struct kvm_mmu_page *sp, *node;
 
        spin_lock(&kvm->mmu_lock);
+restart:
        list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
                if (kvm_mmu_zap_page(kvm, sp))
-                       node = container_of(kvm->arch.active_mmu_pages.next,
-                                           struct kvm_mmu_page, link);
+                       goto restart;
+
        spin_unlock(&kvm->mmu_lock);
 
        kvm_flush_remote_tlbs(kvm);
@@ -3013,7 +3004,8 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
        unsigned int  nr_pages = 0;
        struct kvm_memslots *slots;
 
-       slots = rcu_dereference(kvm->memslots);
+       slots = kvm_memslots(kvm);
+
        for (i = 0; i < slots->nmemslots; i++)
                nr_pages += slots->memslots[i].npages;
 
@@ -3289,7 +3281,7 @@ static int count_rmaps(struct kvm_vcpu *vcpu)
        int i, j, k, idx;
 
        idx = srcu_read_lock(&kvm->srcu);
-       slots = rcu_dereference(kvm->memslots);
+       slots = kvm_memslots(kvm);
        for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
                struct kvm_memory_slot *m = &slots->memslots[i];
                struct kvm_rmap_desc *d;