KVM: Drop kvm->irq_lock lock from irq injection path
[safe/jmp/linux-2.6] / virt / kvm / kvm_main.c
index d7b9bbb..c12c95b 100644 (file)
@@ -137,7 +137,6 @@ static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
                                    interrupt_work);
        kvm = assigned_dev->kvm;
 
-       mutex_lock(&kvm->irq_lock);
        spin_lock_irq(&assigned_dev->assigned_dev_lock);
        if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
                struct kvm_guest_msix_entry *guest_entries =
@@ -156,7 +155,6 @@ static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
                            assigned_dev->guest_irq, 1);
 
        spin_unlock_irq(&assigned_dev->assigned_dev_lock);
-       mutex_unlock(&assigned_dev->kvm->irq_lock);
 }
 
 static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id)
@@ -738,11 +736,10 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
        bool called = true;
        struct kvm_vcpu *vcpu;
 
-       if (alloc_cpumask_var(&cpus, GFP_ATOMIC))
-               cpumask_clear(cpus);
+       zalloc_cpumask_var(&cpus, GFP_ATOMIC);
 
-       me = get_cpu();
        spin_lock(&kvm->requests_lock);
+       me = smp_processor_id();
        kvm_for_each_vcpu(i, vcpu, kvm) {
                if (test_and_set_bit(req, &vcpu->requests))
                        continue;
@@ -757,7 +754,6 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
        else
                called = false;
        spin_unlock(&kvm->requests_lock);
-       put_cpu();
        free_cpumask_var(cpus);
        return called;
 }
@@ -852,6 +848,19 @@ static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
 
 }
 
+static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
+                                       struct mm_struct *mm,
+                                       unsigned long address,
+                                       pte_t pte)
+{
+       struct kvm *kvm = mmu_notifier_to_kvm(mn);
+
+       spin_lock(&kvm->mmu_lock);
+       kvm->mmu_notifier_seq++;
+       kvm_set_spte_hva(kvm, address, pte);
+       spin_unlock(&kvm->mmu_lock);
+}
+
 static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
                                                    struct mm_struct *mm,
                                                    unsigned long start,
@@ -931,6 +940,7 @@ static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
        .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
        .invalidate_range_end   = kvm_mmu_notifier_invalidate_range_end,
        .clear_flush_young      = kvm_mmu_notifier_clear_flush_young,
+       .change_pte             = kvm_mmu_notifier_change_pte,
        .release                = kvm_mmu_notifier_release,
 };
 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
@@ -945,8 +955,8 @@ static struct kvm *kvm_create_vm(void)
        if (IS_ERR(kvm))
                goto out;
 #ifdef CONFIG_HAVE_KVM_IRQCHIP
-       INIT_LIST_HEAD(&kvm->irq_routing);
        INIT_HLIST_HEAD(&kvm->mask_notifier_list);
+       INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
 #endif
 
 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
@@ -1092,9 +1102,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
 {
        int r;
        gfn_t base_gfn;
-       unsigned long npages, ugfn;
-       int lpages;
-       unsigned long i, j;
+       unsigned long npages;
+       unsigned long i;
        struct kvm_memory_slot *memslot;
        struct kvm_memory_slot old, new;
 
@@ -1172,6 +1181,9 @@ int __kvm_set_memory_region(struct kvm *kvm,
                goto skip_lpage;
 
        for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
+               unsigned long ugfn;
+               unsigned long j;
+               int lpages;
                int level = i + 2;
 
                /* Avoid unused variable warning if no large pages */
@@ -1666,9 +1678,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
        for (;;) {
                prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
 
-               if ((kvm_arch_interrupt_allowed(vcpu) &&
-                                       kvm_cpu_has_interrupt(vcpu)) ||
-                               kvm_arch_vcpu_runnable(vcpu)) {
+               if (kvm_arch_vcpu_runnable(vcpu)) {
                        set_bit(KVM_REQ_UNHALT, &vcpu->requests);
                        break;
                }
@@ -1677,9 +1687,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
                if (signal_pending(current))
                        break;
 
-               vcpu_put(vcpu);
                schedule();
-               vcpu_load(vcpu);
        }
 
        finish_wait(&vcpu->wq, &wait);
@@ -1715,7 +1723,7 @@ static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        return 0;
 }
 
-static struct vm_operations_struct kvm_vcpu_vm_ops = {
+static const struct vm_operations_struct kvm_vcpu_vm_ops = {
        .fault = kvm_vcpu_fault,
 };
 
@@ -2239,6 +2247,7 @@ static long kvm_vm_ioctl(struct file *filp,
                vfree(entries);
                break;
        }
+#endif /* KVM_CAP_IRQ_ROUTING */
 #ifdef __KVM_HAVE_MSIX
        case KVM_ASSIGN_SET_MSIX_NR: {
                struct kvm_assigned_msix_nr entry_nr;
@@ -2261,7 +2270,6 @@ static long kvm_vm_ioctl(struct file *filp,
                break;
        }
 #endif
-#endif /* KVM_CAP_IRQ_ROUTING */
        case KVM_IRQFD: {
                struct kvm_irqfd data;
 
@@ -2319,7 +2327,7 @@ static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        return 0;
 }
 
-static struct vm_operations_struct kvm_vm_vm_ops = {
+static const struct vm_operations_struct kvm_vm_vm_ops = {
        .fault = kvm_vm_fault,
 };
 
@@ -2627,7 +2635,7 @@ static int vcpu_stat_get(void *_offset, u64 *val)
 
 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
 
-static struct file_operations *stat_fops[] = {
+static const struct file_operations *stat_fops[] = {
        [KVM_STAT_VCPU] = &vcpu_stat_fops,
        [KVM_STAT_VM]   = &vm_stat_fops,
 };
@@ -2705,8 +2713,6 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
        int r;
        int cpu;
 
-       kvm_init_debug();
-
        r = kvm_arch_init(opaque);
        if (r)
                goto out_fail;
@@ -2773,6 +2779,8 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
        kvm_preempt_ops.sched_in = kvm_sched_in;
        kvm_preempt_ops.sched_out = kvm_sched_out;
 
+       kvm_init_debug();
+
        return 0;
 
 out_free:
@@ -2794,7 +2802,6 @@ out_free_0:
        __free_page(bad_page);
 out:
        kvm_arch_exit();
-       kvm_exit_debug();
 out_fail:
        return r;
 }
@@ -2803,6 +2810,7 @@ EXPORT_SYMBOL_GPL(kvm_init);
 void kvm_exit(void)
 {
        tracepoint_synchronize_unregister();
+       kvm_exit_debug();
        misc_deregister(&kvm_dev);
        kmem_cache_destroy(kvm_vcpu_cache);
        sysdev_unregister(&kvm_sysdev);
@@ -2812,7 +2820,6 @@ void kvm_exit(void)
        on_each_cpu(hardware_disable, NULL, 1);
        kvm_arch_hardware_unsetup();
        kvm_arch_exit();
-       kvm_exit_debug();
        free_cpumask_var(cpus_hardware_enabled);
        __free_page(bad_page);
 }