KVM: Fix guest shared interrupt with in-kernel irqchip
[safe/jmp/linux-2.6] / virt / kvm / kvm_main.c
index 0309571..a87f45e 100644 (file)
 #include "coalesced_mmio.h"
 #endif
 
+#ifdef KVM_CAP_DEVICE_ASSIGNMENT
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include "irq.h"
+#endif
+
 MODULE_AUTHOR("Qumranet");
 MODULE_LICENSE("GPL");
 
@@ -71,12 +77,250 @@ static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
 
 bool kvm_rebooting;
 
+#ifdef KVM_CAP_DEVICE_ASSIGNMENT
+static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
+                                                     int assigned_dev_id)
+{
+       struct list_head *ptr;
+       struct kvm_assigned_dev_kernel *match;
+
+       list_for_each(ptr, head) {
+               match = list_entry(ptr, struct kvm_assigned_dev_kernel, list);
+               if (match->assigned_dev_id == assigned_dev_id)
+                       return match;
+       }
+       return NULL;
+}
+
+static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
+{
+       struct kvm_assigned_dev_kernel *assigned_dev;
+
+       assigned_dev = container_of(work, struct kvm_assigned_dev_kernel,
+                                   interrupt_work);
+
+       /* This is taken to safely inject irq inside the guest. When
+        * the interrupt injection (or the ioapic code) uses a
+        * finer-grained lock, update this
+        */
+       mutex_lock(&assigned_dev->kvm->lock);
+       kvm_set_irq(assigned_dev->kvm,
+                   assigned_dev->irq_source_id,
+                   assigned_dev->guest_irq, 1);
+       mutex_unlock(&assigned_dev->kvm->lock);
+       kvm_put_kvm(assigned_dev->kvm);
+}
+
+static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id)
+{
+       struct kvm_assigned_dev_kernel *assigned_dev =
+               (struct kvm_assigned_dev_kernel *) dev_id;
+
+       kvm_get_kvm(assigned_dev->kvm);
+       schedule_work(&assigned_dev->interrupt_work);
+       disable_irq_nosync(irq);
+       return IRQ_HANDLED;
+}
+
+/* Ack the irq line for an assigned device */
+static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
+{
+       struct kvm_assigned_dev_kernel *dev;
+
+       if (kian->gsi == -1)
+               return;
+
+       dev = container_of(kian, struct kvm_assigned_dev_kernel,
+                          ack_notifier);
+       kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0);
+       enable_irq(dev->host_irq);
+}
+
+static void kvm_free_assigned_device(struct kvm *kvm,
+                                    struct kvm_assigned_dev_kernel
+                                    *assigned_dev)
+{
+       if (irqchip_in_kernel(kvm) && assigned_dev->irq_requested)
+               free_irq(assigned_dev->host_irq, (void *)assigned_dev);
+
+       kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier);
+       kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id);
+
+       if (cancel_work_sync(&assigned_dev->interrupt_work))
+               /* We had pending work. That means we will have to take
+                * care of kvm_put_kvm.
+                */
+               kvm_put_kvm(kvm);
+
+       pci_release_regions(assigned_dev->dev);
+       pci_disable_device(assigned_dev->dev);
+       pci_dev_put(assigned_dev->dev);
+
+       list_del(&assigned_dev->list);
+       kfree(assigned_dev);
+}
+
+void kvm_free_all_assigned_devices(struct kvm *kvm)
+{
+       struct list_head *ptr, *ptr2;
+       struct kvm_assigned_dev_kernel *assigned_dev;
+
+       list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) {
+               assigned_dev = list_entry(ptr,
+                                         struct kvm_assigned_dev_kernel,
+                                         list);
+
+               kvm_free_assigned_device(kvm, assigned_dev);
+       }
+}
+
+static int kvm_vm_ioctl_assign_irq(struct kvm *kvm,
+                                  struct kvm_assigned_irq
+                                  *assigned_irq)
+{
+       int r = 0;
+       struct kvm_assigned_dev_kernel *match;
+
+       mutex_lock(&kvm->lock);
+
+       match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
+                                     assigned_irq->assigned_dev_id);
+       if (!match) {
+               mutex_unlock(&kvm->lock);
+               return -EINVAL;
+       }
+
+       if (match->irq_requested) {
+               match->guest_irq = assigned_irq->guest_irq;
+               match->ack_notifier.gsi = assigned_irq->guest_irq;
+               mutex_unlock(&kvm->lock);
+               return 0;
+       }
+
+       INIT_WORK(&match->interrupt_work,
+                 kvm_assigned_dev_interrupt_work_handler);
+
+       if (irqchip_in_kernel(kvm)) {
+               if (!capable(CAP_SYS_RAWIO)) {
+                       r = -EPERM;
+                       goto out_release;
+               }
+
+               if (assigned_irq->host_irq)
+                       match->host_irq = assigned_irq->host_irq;
+               else
+                       match->host_irq = match->dev->irq;
+               match->guest_irq = assigned_irq->guest_irq;
+               match->ack_notifier.gsi = assigned_irq->guest_irq;
+               match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq;
+               kvm_register_irq_ack_notifier(kvm, &match->ack_notifier);
+               r = kvm_request_irq_source_id(kvm);
+               if (r < 0)
+                       goto out_release;
+               else
+                       match->irq_source_id = r;
+
+               /* Even though this is PCI, we don't want to use shared
+                * interrupts. Sharing host devices with guest-assigned devices
+                * on the same interrupt line is not a happy situation: there
+                * are going to be long delays in accepting, acking, etc.
+                */
+               if (request_irq(match->host_irq, kvm_assigned_dev_intr, 0,
+                               "kvm_assigned_device", (void *)match)) {
+                       r = -EIO;
+                       goto out_release;
+               }
+       }
+
+       match->irq_requested = true;
+       mutex_unlock(&kvm->lock);
+       return r;
+out_release:
+       mutex_unlock(&kvm->lock);
+       kvm_free_assigned_device(kvm, match);
+       return r;
+}
+
+static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
+                                     struct kvm_assigned_pci_dev *assigned_dev)
+{
+       int r = 0;
+       struct kvm_assigned_dev_kernel *match;
+       struct pci_dev *dev;
+
+       mutex_lock(&kvm->lock);
+
+       match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
+                                     assigned_dev->assigned_dev_id);
+       if (match) {
+               /* device already assigned */
+               r = -EINVAL;
+               goto out;
+       }
+
+       match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL);
+       if (match == NULL) {
+               printk(KERN_INFO "%s: Couldn't allocate memory\n",
+                      __func__);
+               r = -ENOMEM;
+               goto out;
+       }
+       dev = pci_get_bus_and_slot(assigned_dev->busnr,
+                                  assigned_dev->devfn);
+       if (!dev) {
+               printk(KERN_INFO "%s: host device not found\n", __func__);
+               r = -EINVAL;
+               goto out_free;
+       }
+       if (pci_enable_device(dev)) {
+               printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
+               r = -EBUSY;
+               goto out_put;
+       }
+       r = pci_request_regions(dev, "kvm_assigned_device");
+       if (r) {
+               printk(KERN_INFO "%s: Could not get access to device regions\n",
+                      __func__);
+               goto out_disable;
+       }
+       match->assigned_dev_id = assigned_dev->assigned_dev_id;
+       match->host_busnr = assigned_dev->busnr;
+       match->host_devfn = assigned_dev->devfn;
+       match->dev = dev;
+
+       match->kvm = kvm;
+
+       list_add(&match->list, &kvm->arch.assigned_dev_head);
+
+       if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) {
+               r = kvm_iommu_map_guest(kvm, match);
+               if (r)
+                       goto out_list_del;
+       }
+
+out:
+       mutex_unlock(&kvm->lock);
+       return r;
+out_list_del:
+       list_del(&match->list);
+       pci_release_regions(dev);
+out_disable:
+       pci_disable_device(dev);
+out_put:
+       pci_dev_put(dev);
+out_free:
+       kfree(match);
+       mutex_unlock(&kvm->lock);
+       return r;
+}
+#endif
+
 static inline int valid_vcpu(int n)
 {
        return likely(n >= 0 && n < KVM_MAX_VCPUS);
 }
 
-static inline int is_mmio_pfn(pfn_t pfn)
+inline int kvm_is_mmio_pfn(pfn_t pfn)
 {
        if (pfn_valid(pfn))
                return PageReserved(pfn_to_page(pfn));
@@ -578,6 +822,12 @@ int __kvm_set_memory_region(struct kvm *kvm,
        }
 
        kvm_free_physmem_slot(&old, &new);
+#ifdef CONFIG_DMAR
+       /* map the pages in iommu page table */
+       r = kvm_iommu_map_pages(kvm, base_gfn, npages);
+       if (r)
+               goto out;
+#endif
        return 0;
 
 out_free:
@@ -716,9 +966,6 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
 }
 EXPORT_SYMBOL_GPL(gfn_to_hva);
 
-/*
- * Requires current->mm->mmap_sem to be held
- */
 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
 {
        struct page *page[1];
@@ -734,21 +981,24 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
                return page_to_pfn(bad_page);
        }
 
-       npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page,
-                               NULL);
+       npages = get_user_pages_fast(addr, 1, 1, page);
 
        if (unlikely(npages != 1)) {
                struct vm_area_struct *vma;
 
+               down_read(&current->mm->mmap_sem);
                vma = find_vma(current->mm, addr);
+
                if (vma == NULL || addr < vma->vm_start ||
                    !(vma->vm_flags & VM_PFNMAP)) {
+                       up_read(&current->mm->mmap_sem);
                        get_page(bad_page);
                        return page_to_pfn(bad_page);
                }
 
                pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
-               BUG_ON(!is_mmio_pfn(pfn));
+               up_read(&current->mm->mmap_sem);
+               BUG_ON(!kvm_is_mmio_pfn(pfn));
        } else
                pfn = page_to_pfn(page[0]);
 
@@ -762,10 +1012,10 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
        pfn_t pfn;
 
        pfn = gfn_to_pfn(kvm, gfn);
-       if (!is_mmio_pfn(pfn))
+       if (!kvm_is_mmio_pfn(pfn))
                return pfn_to_page(pfn);
 
-       WARN_ON(is_mmio_pfn(pfn));
+       WARN_ON(kvm_is_mmio_pfn(pfn));
 
        get_page(bad_page);
        return bad_page;
@@ -781,7 +1031,7 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean);
 
 void kvm_release_pfn_clean(pfn_t pfn)
 {
-       if (!is_mmio_pfn(pfn))
+       if (!kvm_is_mmio_pfn(pfn))
                put_page(pfn_to_page(pfn));
 }
 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
@@ -807,7 +1057,7 @@ EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
 
 void kvm_set_pfn_dirty(pfn_t pfn)
 {
-       if (!is_mmio_pfn(pfn)) {
+       if (!kvm_is_mmio_pfn(pfn)) {
                struct page *page = pfn_to_page(pfn);
                if (!PageReserved(page))
                        SetPageDirty(page);
@@ -817,14 +1067,14 @@ EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
 
 void kvm_set_pfn_accessed(pfn_t pfn)
 {
-       if (!is_mmio_pfn(pfn))
+       if (!kvm_is_mmio_pfn(pfn))
                mark_page_accessed(pfn_to_page(pfn));
 }
 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
 
 void kvm_get_pfn(pfn_t pfn)
 {
-       if (!is_mmio_pfn(pfn))
+       if (!kvm_is_mmio_pfn(pfn))
                get_page(pfn_to_page(pfn));
 }
 EXPORT_SYMBOL_GPL(kvm_get_pfn);
@@ -980,12 +1230,12 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
        for (;;) {
                prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
 
-               if (kvm_cpu_has_interrupt(vcpu))
-                       break;
-               if (kvm_cpu_has_pending_timer(vcpu))
-                       break;
-               if (kvm_arch_vcpu_runnable(vcpu))
+               if (kvm_cpu_has_interrupt(vcpu) ||
+                   kvm_cpu_has_pending_timer(vcpu) ||
+                   kvm_arch_vcpu_runnable(vcpu)) {
+                       set_bit(KVM_REQ_UNHALT, &vcpu->requests);
                        break;
+               }
                if (signal_pending(current))
                        break;
 
@@ -1082,12 +1332,11 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
 
        r = kvm_arch_vcpu_setup(vcpu);
        if (r)
-               goto vcpu_destroy;
+               return r;
 
        mutex_lock(&kvm->lock);
        if (kvm->vcpus[n]) {
                r = -EEXIST;
-               mutex_unlock(&kvm->lock);
                goto vcpu_destroy;
        }
        kvm->vcpus[n] = vcpu;
@@ -1103,8 +1352,8 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
 unlink:
        mutex_lock(&kvm->lock);
        kvm->vcpus[n] = NULL;
-       mutex_unlock(&kvm->lock);
 vcpu_destroy:
+       mutex_unlock(&kvm->lock);
        kvm_arch_vcpu_destroy(vcpu);
        return r;
 }
@@ -1378,6 +1627,30 @@ static long kvm_vm_ioctl(struct file *filp,
                break;
        }
 #endif
+#ifdef KVM_CAP_DEVICE_ASSIGNMENT
+       case KVM_ASSIGN_PCI_DEVICE: {
+               struct kvm_assigned_pci_dev assigned_dev;
+
+               r = -EFAULT;
+               if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
+                       goto out;
+               r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev);
+               if (r)
+                       goto out;
+               break;
+       }
+       case KVM_ASSIGN_IRQ: {
+               struct kvm_assigned_irq assigned_irq;
+
+               r = -EFAULT;
+               if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
+                       goto out;
+               r = kvm_vm_ioctl_assign_irq(kvm, &assigned_irq);
+               if (r)
+                       goto out;
+               break;
+       }
+#endif
        default:
                r = kvm_arch_vm_ioctl(filp, ioctl, arg);
        }
@@ -1387,17 +1660,22 @@ out:
 
 static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
+       struct page *page[1];
+       unsigned long addr;
+       int npages;
+       gfn_t gfn = vmf->pgoff;
        struct kvm *kvm = vma->vm_file->private_data;
-       struct page *page;
 
-       if (!kvm_is_visible_gfn(kvm, vmf->pgoff))
+       addr = gfn_to_hva(kvm, gfn);
+       if (kvm_is_error_hva(addr))
                return VM_FAULT_SIGBUS;
-       page = gfn_to_page(kvm, vmf->pgoff);
-       if (is_error_page(page)) {
-               kvm_release_page_clean(page);
+
+       npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
+                               NULL);
+       if (unlikely(npages != 1))
                return VM_FAULT_SIGBUS;
-       }
-       vmf->page = page;
+
+       vmf->page = page[0];
        return 0;
 }