KVM: MMU: Don't read pdptrs with mmu spinlock held in mmu_alloc_roots
authorAvi Kivity <avi@redhat.com>
Tue, 4 May 2010 09:58:32 +0000 (12:58 +0300)
committerAvi Kivity <avi@redhat.com>
Wed, 19 May 2010 08:36:35 +0000 (11:36 +0300)
On svm, kvm_read_pdptr() may require reading guest memory, which can sleep.

Push the spinlock into mmu_alloc_roots(), and only take it after we've read
the pdptr.

Tested-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/x86/kvm/mmu.c

index 51eb6d6..de99638 100644 (file)
@@ -2065,11 +2065,13 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
                        direct = 1;
                        root_gfn = 0;
                }
+               spin_lock(&vcpu->kvm->mmu_lock);
                sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
                                      PT64_ROOT_LEVEL, direct,
                                      ACC_ALL, NULL);
                root = __pa(sp->spt);
                ++sp->root_count;
+               spin_unlock(&vcpu->kvm->mmu_lock);
                vcpu->arch.mmu.root_hpa = root;
                return 0;
        }
@@ -2093,11 +2095,14 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
                        direct = 1;
                        root_gfn = i << 30;
                }
+               spin_lock(&vcpu->kvm->mmu_lock);
                sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
                                      PT32_ROOT_LEVEL, direct,
                                      ACC_ALL, NULL);
                root = __pa(sp->spt);
                ++sp->root_count;
+               spin_unlock(&vcpu->kvm->mmu_lock);
+
                vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
        }
        vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
@@ -2466,7 +2471,9 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
                goto out;
        spin_lock(&vcpu->kvm->mmu_lock);
        kvm_mmu_free_some_pages(vcpu);
+       spin_unlock(&vcpu->kvm->mmu_lock);
        r = mmu_alloc_roots(vcpu);
+       spin_lock(&vcpu->kvm->mmu_lock);
        mmu_sync_roots(vcpu);
        spin_unlock(&vcpu->kvm->mmu_lock);
        if (r)