X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=virt%2Fkvm%2Fkvm_main.c;h=0ed662dc72d27e3d70409381e87271608e71b0f6;hb=fc5659c8c6b6c4e02ac354b369017c1bf231f347;hp=4727c08da2e9a36e16fd093a7059ae6e1d6da270;hpb=e7cacd40d20849f69c908f1290c714145073685a;p=safe%2Fjmp%2Flinux-2.6 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 4727c08..0ed662d 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -60,10 +60,13 @@ MODULE_AUTHOR("Qumranet"); MODULE_LICENSE("GPL"); +static int msi2intx = 1; +module_param(msi2intx, bool, 0); + DEFINE_SPINLOCK(kvm_lock); LIST_HEAD(vm_list); -static cpumask_t cpus_hardware_enabled; +static cpumask_var_t cpus_hardware_enabled; struct kmem_cache *kvm_vcpu_cache; EXPORT_SYMBOL_GPL(kvm_vcpu_cache); @@ -75,7 +78,7 @@ struct dentry *kvm_debugfs_dir; static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, unsigned long arg); -bool kvm_rebooting; +static bool kvm_rebooting; #ifdef KVM_CAP_DEVICE_ASSIGNMENT static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head, @@ -104,11 +107,14 @@ static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work) * finer-grained lock, update this */ mutex_lock(&assigned_dev->kvm->lock); - kvm_set_irq(assigned_dev->kvm, - assigned_dev->irq_source_id, + kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, assigned_dev->guest_irq, 1); + + if (assigned_dev->irq_requested_type & KVM_ASSIGNED_DEV_GUEST_MSI) { + enable_irq(assigned_dev->host_irq); + assigned_dev->host_irq_disabled = false; + } mutex_unlock(&assigned_dev->kvm->lock); - kvm_put_kvm(assigned_dev->kvm); } static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id) @@ -116,9 +122,11 @@ static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id) struct kvm_assigned_dev_kernel *assigned_dev = (struct kvm_assigned_dev_kernel *) dev_id; - kvm_get_kvm(assigned_dev->kvm); schedule_work(&assigned_dev->interrupt_work); + disable_irq_nosync(irq); + assigned_dev->host_irq_disabled = true; + return IRQ_HANDLED; } @@ -132,25 +140,67 @@ static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian) dev = container_of(kian, struct kvm_assigned_dev_kernel, ack_notifier); + kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0); - enable_irq(dev->host_irq); + + /* The guest irq may be shared so this ack may be + * from another device. + */ + if (dev->host_irq_disabled) { + enable_irq(dev->host_irq); + dev->host_irq_disabled = false; + } } -static void kvm_free_assigned_device(struct kvm *kvm, - struct kvm_assigned_dev_kernel - *assigned_dev) +/* The function implicit hold kvm->lock mutex due to cancel_work_sync() */ +static void kvm_free_assigned_irq(struct kvm *kvm, + struct kvm_assigned_dev_kernel *assigned_dev) { - if (irqchip_in_kernel(kvm) && assigned_dev->irq_requested) - free_irq(assigned_dev->host_irq, (void *)assigned_dev); + if (!irqchip_in_kernel(kvm)) + return; kvm_unregister_irq_ack_notifier(&assigned_dev->ack_notifier); - kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id); - if (cancel_work_sync(&assigned_dev->interrupt_work)) - /* We had pending work. That means we will have to take - * care of kvm_put_kvm. - */ - kvm_put_kvm(kvm); + if (assigned_dev->irq_source_id != -1) + kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id); + assigned_dev->irq_source_id = -1; + + if (!assigned_dev->irq_requested_type) + return; + + /* + * In kvm_free_device_irq, cancel_work_sync return true if: + * 1. work is scheduled, and then cancelled. + * 2. work callback is executed. + * + * The first one ensured that the irq is disabled and no more events + * would happen. But for the second one, the irq may be enabled (e.g. + * for MSI). So we disable irq here to prevent further events. + * + * Notice this maybe result in nested disable if the interrupt type is + * INTx, but it's OK for we are going to free it. + * + * If this function is a part of VM destroy, please ensure that till + * now, the kvm state is still legal for probably we also have to wait + * interrupt_work done. + */ + disable_irq_nosync(assigned_dev->host_irq); + cancel_work_sync(&assigned_dev->interrupt_work); + + free_irq(assigned_dev->host_irq, (void *)assigned_dev); + + if (assigned_dev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_MSI) + pci_disable_msi(assigned_dev->dev); + + assigned_dev->irq_requested_type = 0; +} + + +static void kvm_free_assigned_device(struct kvm *kvm, + struct kvm_assigned_dev_kernel + *assigned_dev) +{ + kvm_free_assigned_irq(kvm, assigned_dev); pci_reset_function(assigned_dev->dev); @@ -176,12 +226,108 @@ void kvm_free_all_assigned_devices(struct kvm *kvm) } } +static int assigned_device_update_intx(struct kvm *kvm, + struct kvm_assigned_dev_kernel *adev, + struct kvm_assigned_irq *airq) +{ + adev->guest_irq = airq->guest_irq; + adev->ack_notifier.gsi = airq->guest_irq; + + if (adev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_INTX) + return 0; + + if (irqchip_in_kernel(kvm)) { + if (!msi2intx && + (adev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_MSI)) { + free_irq(adev->host_irq, (void *)adev); + pci_disable_msi(adev->dev); + } + + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + + if (airq->host_irq) + adev->host_irq = airq->host_irq; + else + adev->host_irq = adev->dev->irq; + + /* Even though this is PCI, we don't want to use shared + * interrupts. Sharing host devices with guest-assigned devices + * on the same interrupt line is not a happy situation: there + * are going to be long delays in accepting, acking, etc. + */ + if (request_irq(adev->host_irq, kvm_assigned_dev_intr, + 0, "kvm_assigned_intx_device", (void *)adev)) + return -EIO; + } + + adev->irq_requested_type = KVM_ASSIGNED_DEV_GUEST_INTX | + KVM_ASSIGNED_DEV_HOST_INTX; + return 0; +} + +#ifdef CONFIG_X86 +static int assigned_device_update_msi(struct kvm *kvm, + struct kvm_assigned_dev_kernel *adev, + struct kvm_assigned_irq *airq) +{ + int r; + + adev->guest_irq = airq->guest_irq; + if (airq->flags & KVM_DEV_IRQ_ASSIGN_ENABLE_MSI) { + /* x86 don't care upper address of guest msi message addr */ + adev->irq_requested_type |= KVM_ASSIGNED_DEV_GUEST_MSI; + adev->irq_requested_type &= ~KVM_ASSIGNED_DEV_GUEST_INTX; + adev->ack_notifier.gsi = -1; + } else if (msi2intx) { + adev->irq_requested_type |= KVM_ASSIGNED_DEV_GUEST_INTX; + adev->irq_requested_type &= ~KVM_ASSIGNED_DEV_GUEST_MSI; + adev->ack_notifier.gsi = airq->guest_irq; + } else { + /* + * Guest require to disable device MSI, we disable MSI and + * re-enable INTx by default again. Notice it's only for + * non-msi2intx. + */ + assigned_device_update_intx(kvm, adev, airq); + return 0; + } + + if (adev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_MSI) + return 0; + + if (irqchip_in_kernel(kvm)) { + if (!msi2intx) { + if (adev->irq_requested_type & + KVM_ASSIGNED_DEV_HOST_INTX) + free_irq(adev->host_irq, (void *)adev); + + r = pci_enable_msi(adev->dev); + if (r) + return r; + } + + adev->host_irq = adev->dev->irq; + if (request_irq(adev->host_irq, kvm_assigned_dev_intr, 0, + "kvm_assigned_msi_device", (void *)adev)) + return -EIO; + } + + if (!msi2intx) + adev->irq_requested_type = KVM_ASSIGNED_DEV_GUEST_MSI; + + adev->irq_requested_type |= KVM_ASSIGNED_DEV_HOST_MSI; + return 0; +} +#endif + static int kvm_vm_ioctl_assign_irq(struct kvm *kvm, struct kvm_assigned_irq *assigned_irq) { int r = 0; struct kvm_assigned_dev_kernel *match; + u32 current_flags = 0, changed_flags; mutex_lock(&kvm->lock); @@ -192,49 +338,73 @@ static int kvm_vm_ioctl_assign_irq(struct kvm *kvm, return -EINVAL; } - if (match->irq_requested) { - match->guest_irq = assigned_irq->guest_irq; - match->ack_notifier.gsi = assigned_irq->guest_irq; - mutex_unlock(&kvm->lock); - return 0; + if (!match->irq_requested_type) { + INIT_WORK(&match->interrupt_work, + kvm_assigned_dev_interrupt_work_handler); + if (irqchip_in_kernel(kvm)) { + /* Register ack nofitier */ + match->ack_notifier.gsi = -1; + match->ack_notifier.irq_acked = + kvm_assigned_dev_ack_irq; + kvm_register_irq_ack_notifier(kvm, + &match->ack_notifier); + + /* Request IRQ source ID */ + r = kvm_request_irq_source_id(kvm); + if (r < 0) + goto out_release; + else + match->irq_source_id = r; + +#ifdef CONFIG_X86 + /* Determine host device irq type, we can know the + * result from dev->msi_enabled */ + if (msi2intx) + pci_enable_msi(match->dev); +#endif + } } - INIT_WORK(&match->interrupt_work, - kvm_assigned_dev_interrupt_work_handler); + if ((match->irq_requested_type & KVM_ASSIGNED_DEV_HOST_MSI) && + (match->irq_requested_type & KVM_ASSIGNED_DEV_GUEST_MSI)) + current_flags |= KVM_DEV_IRQ_ASSIGN_ENABLE_MSI; - if (irqchip_in_kernel(kvm)) { - if (!capable(CAP_SYS_RAWIO)) { - r = -EPERM; + changed_flags = assigned_irq->flags ^ current_flags; + + if ((changed_flags & KVM_DEV_IRQ_ASSIGN_MSI_ACTION) || + (msi2intx && match->dev->msi_enabled)) { +#ifdef CONFIG_X86 + r = assigned_device_update_msi(kvm, match, assigned_irq); + if (r) { + printk(KERN_WARNING "kvm: failed to enable " + "MSI device!\n"); goto out_release; } - - if (assigned_irq->host_irq) - match->host_irq = assigned_irq->host_irq; - else - match->host_irq = match->dev->irq; - match->guest_irq = assigned_irq->guest_irq; - match->ack_notifier.gsi = assigned_irq->guest_irq; - match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq; - kvm_register_irq_ack_notifier(kvm, &match->ack_notifier); - r = kvm_request_irq_source_id(kvm); - if (r < 0) +#else + r = -ENOTTY; +#endif + } else if (assigned_irq->host_irq == 0 && match->dev->irq == 0) { + /* Host device IRQ 0 means don't support INTx */ + if (!msi2intx) { + printk(KERN_WARNING + "kvm: wait device to enable MSI!\n"); + r = 0; + } else { + printk(KERN_WARNING + "kvm: failed to enable MSI device!\n"); + r = -ENOTTY; goto out_release; - else - match->irq_source_id = r; - - /* Even though this is PCI, we don't want to use shared - * interrupts. Sharing host devices with guest-assigned devices - * on the same interrupt line is not a happy situation: there - * are going to be long delays in accepting, acking, etc. - */ - if (request_irq(match->host_irq, kvm_assigned_dev_intr, 0, - "kvm_assigned_device", (void *)match)) { - r = -EIO; + } + } else { + /* Non-sharing INTx mode */ + r = assigned_device_update_intx(kvm, match, assigned_irq); + if (r) { + printk(KERN_WARNING "kvm: failed to enable " + "INTx device!\n"); goto out_release; } } - match->irq_requested = true; mutex_unlock(&kvm->lock); return r; out_release: @@ -250,6 +420,7 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm, struct kvm_assigned_dev_kernel *match; struct pci_dev *dev; + down_read(&kvm->slots_lock); mutex_lock(&kvm->lock); match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, @@ -291,20 +462,27 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm, match->assigned_dev_id = assigned_dev->assigned_dev_id; match->host_busnr = assigned_dev->busnr; match->host_devfn = assigned_dev->devfn; + match->flags = assigned_dev->flags; match->dev = dev; - + match->irq_source_id = -1; match->kvm = kvm; list_add(&match->list, &kvm->arch.assigned_dev_head); if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) { - r = kvm_iommu_map_guest(kvm, match); + if (!kvm->arch.iommu_domain) { + r = kvm_iommu_map_guest(kvm); + if (r) + goto out_list_del; + } + r = kvm_assign_device(kvm, match); if (r) goto out_list_del; } out: mutex_unlock(&kvm->lock); + up_read(&kvm->slots_lock); return r; out_list_del: list_del(&match->list); @@ -316,6 +494,36 @@ out_put: out_free: kfree(match); mutex_unlock(&kvm->lock); + up_read(&kvm->slots_lock); + return r; +} +#endif + +#ifdef KVM_CAP_DEVICE_DEASSIGNMENT +static int kvm_vm_ioctl_deassign_device(struct kvm *kvm, + struct kvm_assigned_pci_dev *assigned_dev) +{ + int r = 0; + struct kvm_assigned_dev_kernel *match; + + mutex_lock(&kvm->lock); + + match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, + assigned_dev->assigned_dev_id); + if (!match) { + printk(KERN_INFO "%s: device hasn't been assigned before, " + "so cannot be deassigned\n", __func__); + r = -EINVAL; + goto out; + } + + if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) + kvm_deassign_device(kvm, match); + + kvm_free_assigned_device(kvm, match); + +out: + mutex_unlock(&kvm->lock); return r; } #endif @@ -327,8 +535,10 @@ static inline int valid_vcpu(int n) inline int kvm_is_mmio_pfn(pfn_t pfn) { - if (pfn_valid(pfn)) - return PageReserved(pfn_to_page(pfn)); + if (pfn_valid(pfn)) { + struct page *page = compound_head(pfn_to_page(pfn)); + return PageReserved(page); + } return true; } @@ -360,57 +570,48 @@ static void ack_flush(void *_completed) { } -void kvm_flush_remote_tlbs(struct kvm *kvm) +static bool make_all_cpus_request(struct kvm *kvm, unsigned int req) { int i, cpu, me; - cpumask_t cpus; + cpumask_var_t cpus; + bool called = true; struct kvm_vcpu *vcpu; + if (alloc_cpumask_var(&cpus, GFP_ATOMIC)) + cpumask_clear(cpus); + me = get_cpu(); - cpus_clear(cpus); for (i = 0; i < KVM_MAX_VCPUS; ++i) { vcpu = kvm->vcpus[i]; if (!vcpu) continue; - if (test_and_set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests)) + if (test_and_set_bit(req, &vcpu->requests)) continue; cpu = vcpu->cpu; - if (cpu != -1 && cpu != me) - cpu_set(cpu, cpus); + if (cpus != NULL && cpu != -1 && cpu != me) + cpumask_set_cpu(cpu, cpus); } - if (cpus_empty(cpus)) - goto out; - ++kvm->stat.remote_tlb_flush; - smp_call_function_mask(cpus, ack_flush, NULL, 1); -out: + if (unlikely(cpus == NULL)) + smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1); + else if (!cpumask_empty(cpus)) + smp_call_function_many(cpus, ack_flush, NULL, 1); + else + called = false; put_cpu(); + free_cpumask_var(cpus); + return called; } -void kvm_reload_remote_mmus(struct kvm *kvm) +void kvm_flush_remote_tlbs(struct kvm *kvm) { - int i, cpu, me; - cpumask_t cpus; - struct kvm_vcpu *vcpu; - - me = get_cpu(); - cpus_clear(cpus); - for (i = 0; i < KVM_MAX_VCPUS; ++i) { - vcpu = kvm->vcpus[i]; - if (!vcpu) - continue; - if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) - continue; - cpu = vcpu->cpu; - if (cpu != -1 && cpu != me) - cpu_set(cpu, cpus); - } - if (cpus_empty(cpus)) - goto out; - smp_call_function_mask(cpus, ack_flush, NULL, 1); -out: - put_cpu(); + if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) + ++kvm->stat.remote_tlb_flush; } +void kvm_reload_remote_mmus(struct kvm *kvm) +{ + make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); +} int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) { @@ -558,11 +759,19 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, return young; } +static void kvm_mmu_notifier_release(struct mmu_notifier *mn, + struct mm_struct *mm) +{ + struct kvm *kvm = mmu_notifier_to_kvm(mn); + kvm_arch_flush_shadow(kvm); +} + static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { .invalidate_page = kvm_mmu_notifier_invalidate_page, .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, .clear_flush_young = kvm_mmu_notifier_clear_flush_young, + .release = kvm_mmu_notifier_release, }; #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ @@ -575,6 +784,10 @@ static struct kvm *kvm_create_vm(void) if (IS_ERR(kvm)) goto out; +#ifdef CONFIG_HAVE_KVM_IRQCHIP + INIT_LIST_HEAD(&kvm->irq_routing); + INIT_HLIST_HEAD(&kvm->mask_notifier_list); +#endif #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET page = alloc_page(GFP_KERNEL | __GFP_ZERO); @@ -652,9 +865,11 @@ static void kvm_destroy_vm(struct kvm *kvm) { struct mm_struct *mm = kvm->mm; + kvm_arch_sync_events(kvm); spin_lock(&kvm_lock); list_del(&kvm->vm_list); spin_unlock(&kvm_lock); + kvm_free_irq_routing(kvm); kvm_io_bus_destroy(&kvm->pio_bus); kvm_io_bus_destroy(&kvm->mmio_bus); #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET @@ -828,7 +1043,10 @@ int __kvm_set_memory_region(struct kvm *kvm, goto out_free; } - kvm_free_physmem_slot(&old, &new); + kvm_free_physmem_slot(&old, npages ? &new : NULL); + /* Slot deletion case: we have to update the current slot */ + if (!npages) + *memslot = old; #ifdef CONFIG_DMAR /* map the pages in iommu page table */ r = kvm_iommu_map_pages(kvm, base_gfn, npages); @@ -1303,7 +1521,7 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp) return 0; } -static const struct file_operations kvm_vcpu_fops = { +static struct file_operations kvm_vcpu_fops = { .release = kvm_vcpu_release, .unlocked_ioctl = kvm_vcpu_ioctl, .compat_ioctl = kvm_vcpu_ioctl, @@ -1498,13 +1716,13 @@ out_free2: r = 0; break; } - case KVM_DEBUG_GUEST: { - struct kvm_debug_guest dbg; + case KVM_SET_GUEST_DEBUG: { + struct kvm_guest_debug dbg; r = -EFAULT; if (copy_from_user(&dbg, argp, sizeof dbg)) goto out; - r = kvm_arch_vcpu_ioctl_debug_guest(vcpu, &dbg); + r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); if (r) goto out; r = 0; @@ -1659,6 +1877,49 @@ static long kvm_vm_ioctl(struct file *filp, break; } #endif +#ifdef KVM_CAP_DEVICE_DEASSIGNMENT + case KVM_DEASSIGN_PCI_DEVICE: { + struct kvm_assigned_pci_dev assigned_dev; + + r = -EFAULT; + if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev)) + goto out; + r = kvm_vm_ioctl_deassign_device(kvm, &assigned_dev); + if (r) + goto out; + break; + } +#endif +#ifdef KVM_CAP_IRQ_ROUTING + case KVM_SET_GSI_ROUTING: { + struct kvm_irq_routing routing; + struct kvm_irq_routing __user *urouting; + struct kvm_irq_routing_entry *entries; + + r = -EFAULT; + if (copy_from_user(&routing, argp, sizeof(routing))) + goto out; + r = -EINVAL; + if (routing.nr >= KVM_MAX_IRQ_ROUTES) + goto out; + if (routing.flags) + goto out; + r = -ENOMEM; + entries = vmalloc(routing.nr * sizeof(*entries)); + if (!entries) + goto out; + r = -EFAULT; + urouting = argp; + if (copy_from_user(entries, urouting->entries, + routing.nr * sizeof(*entries))) + goto out_free_irq_routing; + r = kvm_set_irq_routing(kvm, entries, routing.nr, + routing.flags); + out_free_irq_routing: + vfree(entries); + break; + } +#endif default: r = kvm_arch_vm_ioctl(filp, ioctl, arg); } @@ -1697,7 +1958,7 @@ static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma) return 0; } -static const struct file_operations kvm_vm_fops = { +static struct file_operations kvm_vm_fops = { .release = kvm_vm_release, .unlocked_ioctl = kvm_vm_ioctl, .compat_ioctl = kvm_vm_ioctl, @@ -1719,6 +1980,22 @@ static int kvm_dev_ioctl_create_vm(void) return fd; } +static long kvm_dev_ioctl_check_extension_generic(long arg) +{ + switch (arg) { + case KVM_CAP_USER_MEMORY: + case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: + return 1; +#ifdef CONFIG_HAVE_KVM_IRQCHIP + case KVM_CAP_IRQ_ROUTING: + return 1; +#endif + default: + break; + } + return kvm_dev_ioctl_check_extension(arg); +} + static long kvm_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { @@ -1738,7 +2015,7 @@ static long kvm_dev_ioctl(struct file *filp, r = kvm_dev_ioctl_create_vm(); break; case KVM_CHECK_EXTENSION: - r = kvm_dev_ioctl_check_extension(arg); + r = kvm_dev_ioctl_check_extension_generic(arg); break; case KVM_GET_VCPU_MMAP_SIZE: r = -EINVAL; @@ -1779,9 +2056,9 @@ static void hardware_enable(void *junk) { int cpu = raw_smp_processor_id(); - if (cpu_isset(cpu, cpus_hardware_enabled)) + if (cpumask_test_cpu(cpu, cpus_hardware_enabled)) return; - cpu_set(cpu, cpus_hardware_enabled); + cpumask_set_cpu(cpu, cpus_hardware_enabled); kvm_arch_hardware_enable(NULL); } @@ -1789,9 +2066,9 @@ static void hardware_disable(void *junk) { int cpu = raw_smp_processor_id(); - if (!cpu_isset(cpu, cpus_hardware_enabled)) + if (!cpumask_test_cpu(cpu, cpus_hardware_enabled)) return; - cpu_clear(cpu, cpus_hardware_enabled); + cpumask_clear_cpu(cpu, cpus_hardware_enabled); kvm_arch_hardware_disable(NULL); } @@ -2025,9 +2302,14 @@ int kvm_init(void *opaque, unsigned int vcpu_size, bad_pfn = page_to_pfn(bad_page); + if (!alloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { + r = -ENOMEM; + goto out_free_0; + } + r = kvm_arch_hardware_setup(); if (r < 0) - goto out_free_0; + goto out_free_0a; for_each_online_cpu(cpu) { smp_call_function_single(cpu, @@ -2061,6 +2343,8 @@ int kvm_init(void *opaque, unsigned int vcpu_size, } kvm_chardev_ops.owner = module; + kvm_vm_fops.owner = module; + kvm_vcpu_fops.owner = module; r = misc_register(&kvm_dev); if (r) { @@ -2070,6 +2354,9 @@ int kvm_init(void *opaque, unsigned int vcpu_size, kvm_preempt_ops.sched_in = kvm_sched_in; kvm_preempt_ops.sched_out = kvm_sched_out; +#ifndef CONFIG_X86 + msi2intx = 0; +#endif return 0; @@ -2086,6 +2373,8 @@ out_free_2: on_each_cpu(hardware_disable, NULL, 1); out_free_1: kvm_arch_hardware_unsetup(); +out_free_0a: + free_cpumask_var(cpus_hardware_enabled); out_free_0: __free_page(bad_page); out: @@ -2109,6 +2398,7 @@ void kvm_exit(void) kvm_arch_hardware_unsetup(); kvm_arch_exit(); kvm_exit_debug(); + free_cpumask_var(cpus_hardware_enabled); __free_page(bad_page); } EXPORT_SYMBOL_GPL(kvm_exit);