KVM: Remove useless intel-iommu.h header inclusion
[safe/jmp/linux-2.6] / virt / kvm / kvm_main.c
index 63e661b..6cf0427 100644 (file)
@@ -76,7 +76,7 @@ static inline int valid_vcpu(int n)
        return likely(n >= 0 && n < KVM_MAX_VCPUS);
 }
 
-static inline int is_mmio_pfn(pfn_t pfn)
+inline int is_mmio_pfn(pfn_t pfn)
 {
        if (pfn_valid(pfn))
                return PageReserved(pfn_to_page(pfn));
@@ -578,6 +578,12 @@ int __kvm_set_memory_region(struct kvm *kvm,
        }
 
        kvm_free_physmem_slot(&old, &new);
+
+       /* map the pages in iommu page table */
+       r = kvm_iommu_map_pages(kvm, base_gfn, npages);
+       if (r)
+               goto out;
+
        return 0;
 
 out_free:
@@ -716,9 +722,6 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
 }
 EXPORT_SYMBOL_GPL(gfn_to_hva);
 
-/*
- * Requires current->mm->mmap_sem to be held
- */
 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
 {
        struct page *page[1];
@@ -734,20 +737,23 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
                return page_to_pfn(bad_page);
        }
 
-       npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
-                               NULL);
+       npages = get_user_pages_fast(addr, 1, 1, page);
 
        if (unlikely(npages != 1)) {
                struct vm_area_struct *vma;
 
+               down_read(&current->mm->mmap_sem);
                vma = find_vma(current->mm, addr);
+
                if (vma == NULL || addr < vma->vm_start ||
                    !(vma->vm_flags & VM_PFNMAP)) {
+                       up_read(&current->mm->mmap_sem);
                        get_page(bad_page);
                        return page_to_pfn(bad_page);
                }
 
                pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
+               up_read(&current->mm->mmap_sem);
                BUG_ON(!is_mmio_pfn(pfn));
        } else
                pfn = page_to_pfn(page[0]);
@@ -1082,12 +1088,11 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
 
        r = kvm_arch_vcpu_setup(vcpu);
        if (r)
-               goto vcpu_destroy;
+               return r;
 
        mutex_lock(&kvm->lock);
        if (kvm->vcpus[n]) {
                r = -EEXIST;
-               mutex_unlock(&kvm->lock);
                goto vcpu_destroy;
        }
        kvm->vcpus[n] = vcpu;
@@ -1103,8 +1108,8 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
 unlink:
        mutex_lock(&kvm->lock);
        kvm->vcpus[n] = NULL;
-       mutex_unlock(&kvm->lock);
 vcpu_destroy:
+       mutex_unlock(&kvm->lock);
        kvm_arch_vcpu_destroy(vcpu);
        return r;
 }
@@ -1387,17 +1392,22 @@ out:
 
 static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
+       struct page *page[1];
+       unsigned long addr;
+       int npages;
+       gfn_t gfn = vmf->pgoff;
        struct kvm *kvm = vma->vm_file->private_data;
-       struct page *page;
 
-       if (!kvm_is_visible_gfn(kvm, vmf->pgoff))
+       addr = gfn_to_hva(kvm, gfn);
+       if (kvm_is_error_hva(addr))
                return VM_FAULT_SIGBUS;
-       page = gfn_to_page(kvm, vmf->pgoff);
-       if (is_error_page(page)) {
-               kvm_release_page_clean(page);
+
+       npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
+                               NULL);
+       if (unlikely(npages != 1))
                return VM_FAULT_SIGBUS;
-       }
-       vmf->page = page;
+
+       vmf->page = page[0];
        return 0;
 }