#include <asm/uaccess.h>
#include <asm/pgtable.h>
+#ifdef CONFIG_X86
+#include <asm/msidef.h>
+#endif
+
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
#include "coalesced_mmio.h"
#endif
+#ifdef KVM_CAP_DEVICE_ASSIGNMENT
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include "irq.h"
+#endif
+
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
+static int msi2intx = 1;
+module_param(msi2intx, bool, 0);
+
DEFINE_SPINLOCK(kvm_lock);
LIST_HEAD(vm_list);
-static cpumask_t cpus_hardware_enabled;
+static cpumask_var_t cpus_hardware_enabled;
struct kmem_cache *kvm_vcpu_cache;
EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
unsigned long arg);
-bool kvm_rebooting;
+static bool kvm_rebooting;
+
+#ifdef KVM_CAP_DEVICE_ASSIGNMENT
+
+#ifdef CONFIG_X86
+static void assigned_device_msi_dispatch(struct kvm_assigned_dev_kernel *dev)
+{
+ int vcpu_id;
+ struct kvm_vcpu *vcpu;
+ struct kvm_ioapic *ioapic = ioapic_irqchip(dev->kvm);
+ int dest_id = (dev->guest_msi.address_lo & MSI_ADDR_DEST_ID_MASK)
+ >> MSI_ADDR_DEST_ID_SHIFT;
+ int vector = (dev->guest_msi.data & MSI_DATA_VECTOR_MASK)
+ >> MSI_DATA_VECTOR_SHIFT;
+ int dest_mode = test_bit(MSI_ADDR_DEST_MODE_SHIFT,
+ (unsigned long *)&dev->guest_msi.address_lo);
+ int trig_mode = test_bit(MSI_DATA_TRIGGER_SHIFT,
+ (unsigned long *)&dev->guest_msi.data);
+ int delivery_mode = test_bit(MSI_DATA_DELIVERY_MODE_SHIFT,
+ (unsigned long *)&dev->guest_msi.data);
+ u32 deliver_bitmask;
+
+ BUG_ON(!ioapic);
+
+ deliver_bitmask = kvm_ioapic_get_delivery_bitmask(ioapic,
+ dest_id, dest_mode);
+ /* IOAPIC delivery mode value is the same as MSI here */
+ switch (delivery_mode) {
+ case IOAPIC_LOWEST_PRIORITY:
+ vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm, vector,
+ deliver_bitmask);
+ if (vcpu != NULL)
+ kvm_apic_set_irq(vcpu, vector, trig_mode);
+ else
+ printk(KERN_INFO "kvm: null lowest priority vcpu!\n");
+ break;
+ case IOAPIC_FIXED:
+ for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
+ if (!(deliver_bitmask & (1 << vcpu_id)))
+ continue;
+ deliver_bitmask &= ~(1 << vcpu_id);
+ vcpu = ioapic->kvm->vcpus[vcpu_id];
+ if (vcpu)
+ kvm_apic_set_irq(vcpu, vector, trig_mode);
+ }
+ break;
+ default:
+ printk(KERN_INFO "kvm: unsupported MSI delivery mode\n");
+ }
+}
+#else
+static void assigned_device_msi_dispatch(struct kvm_assigned_dev_kernel *dev) {}
+#endif
+
+static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
+ int assigned_dev_id)
+{
+ struct list_head *ptr;
+ struct kvm_assigned_dev_kernel *match;
+
+ list_for_each(ptr, head) {
+ match = list_entry(ptr, struct kvm_assigned_dev_kernel, list);
+ if (match->assigned_dev_id == assigned_dev_id)
+ return match;
+ }
+ return NULL;
+}
+
+static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
+{
+ struct kvm_assigned_dev_kernel *assigned_dev;
+
+ assigned_dev = container_of(work, struct kvm_assigned_dev_kernel,
+ interrupt_work);
+
+ /* This is taken to safely inject irq inside the guest. When
+ * the interrupt injection (or the ioapic code) uses a
+ * finer-grained lock, update this
+ */
+ mutex_lock(&assigned_dev->kvm->lock);
+ if (assigned_dev->irq_requested_type & KVM_ASSIGNED_DEV_GUEST_INTX)
+ kvm_set_irq(assigned_dev->kvm,
+ assigned_dev->irq_source_id,
+ assigned_dev->guest_irq, 1);
+ else if (assigned_dev->irq_requested_type &
+ KVM_ASSIGNED_DEV_GUEST_MSI) {
+ assigned_device_msi_dispatch(assigned_dev);
+ enable_irq(assigned_dev->host_irq);
+ assigned_dev->host_irq_disabled = false;
+ }
+ mutex_unlock(&assigned_dev->kvm->lock);
+}
+
+static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id)
+{
+ struct kvm_assigned_dev_kernel *assigned_dev =
+ (struct kvm_assigned_dev_kernel *) dev_id;
+
+ schedule_work(&assigned_dev->interrupt_work);
+
+ disable_irq_nosync(irq);
+ assigned_dev->host_irq_disabled = true;
+
+ return IRQ_HANDLED;
+}
+
+/* Ack the irq line for an assigned device */
+static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
+{
+ struct kvm_assigned_dev_kernel *dev;
+
+ if (kian->gsi == -1)
+ return;
+
+ dev = container_of(kian, struct kvm_assigned_dev_kernel,
+ ack_notifier);
+
+ kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0);
+
+ /* The guest irq may be shared so this ack may be
+ * from another device.
+ */
+ if (dev->host_irq_disabled) {
+ enable_irq(dev->host_irq);
+ dev->host_irq_disabled = false;
+ }
+}
+
+/* The function implicit hold kvm->lock mutex due to cancel_work_sync() */
+static void kvm_free_assigned_irq(struct kvm *kvm,
+ struct kvm_assigned_dev_kernel *assigned_dev)
+{
+ if (!irqchip_in_kernel(kvm))
+ return;
+
+ kvm_unregister_irq_ack_notifier(&assigned_dev->ack_notifier);
+
+ if (assigned_dev->irq_source_id != -1)
+ kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id);
+ assigned_dev->irq_source_id = -1;
+
+ if (!assigned_dev->irq_requested_type)
+ return;
+
+ /*
+ * In kvm_free_device_irq, cancel_work_sync return true if:
+ * 1. work is scheduled, and then cancelled.
+ * 2. work callback is executed.
+ *
+ * The first one ensured that the irq is disabled and no more events
+ * would happen. But for the second one, the irq may be enabled (e.g.
+ * for MSI). So we disable irq here to prevent further events.
+ *
+ * Notice this maybe result in nested disable if the interrupt type is
+ * INTx, but it's OK for we are going to free it.
+ *
+ * If this function is a part of VM destroy, please ensure that till
+ * now, the kvm state is still legal for probably we also have to wait
+ * interrupt_work done.
+ */
+ disable_irq_nosync(assigned_dev->host_irq);
+ cancel_work_sync(&assigned_dev->interrupt_work);
+
+ free_irq(assigned_dev->host_irq, (void *)assigned_dev);
+
+ if (assigned_dev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_MSI)
+ pci_disable_msi(assigned_dev->dev);
+
+ assigned_dev->irq_requested_type = 0;
+}
+
+
+static void kvm_free_assigned_device(struct kvm *kvm,
+ struct kvm_assigned_dev_kernel
+ *assigned_dev)
+{
+ kvm_free_assigned_irq(kvm, assigned_dev);
+
+ pci_reset_function(assigned_dev->dev);
+
+ pci_release_regions(assigned_dev->dev);
+ pci_disable_device(assigned_dev->dev);
+ pci_dev_put(assigned_dev->dev);
+
+ list_del(&assigned_dev->list);
+ kfree(assigned_dev);
+}
+
+void kvm_free_all_assigned_devices(struct kvm *kvm)
+{
+ struct list_head *ptr, *ptr2;
+ struct kvm_assigned_dev_kernel *assigned_dev;
+
+ list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) {
+ assigned_dev = list_entry(ptr,
+ struct kvm_assigned_dev_kernel,
+ list);
+
+ kvm_free_assigned_device(kvm, assigned_dev);
+ }
+}
+
+static int assigned_device_update_intx(struct kvm *kvm,
+ struct kvm_assigned_dev_kernel *adev,
+ struct kvm_assigned_irq *airq)
+{
+ adev->guest_irq = airq->guest_irq;
+ adev->ack_notifier.gsi = airq->guest_irq;
+
+ if (adev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_INTX)
+ return 0;
+
+ if (irqchip_in_kernel(kvm)) {
+ if (!msi2intx &&
+ (adev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_MSI)) {
+ free_irq(adev->host_irq, (void *)adev);
+ pci_disable_msi(adev->dev);
+ }
+
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ if (airq->host_irq)
+ adev->host_irq = airq->host_irq;
+ else
+ adev->host_irq = adev->dev->irq;
+
+ /* Even though this is PCI, we don't want to use shared
+ * interrupts. Sharing host devices with guest-assigned devices
+ * on the same interrupt line is not a happy situation: there
+ * are going to be long delays in accepting, acking, etc.
+ */
+ if (request_irq(adev->host_irq, kvm_assigned_dev_intr,
+ 0, "kvm_assigned_intx_device", (void *)adev))
+ return -EIO;
+ }
+
+ adev->irq_requested_type = KVM_ASSIGNED_DEV_GUEST_INTX |
+ KVM_ASSIGNED_DEV_HOST_INTX;
+ return 0;
+}
+
+#ifdef CONFIG_X86
+static int assigned_device_update_msi(struct kvm *kvm,
+ struct kvm_assigned_dev_kernel *adev,
+ struct kvm_assigned_irq *airq)
+{
+ int r;
+
+ if (airq->flags & KVM_DEV_IRQ_ASSIGN_ENABLE_MSI) {
+ /* x86 don't care upper address of guest msi message addr */
+ adev->irq_requested_type |= KVM_ASSIGNED_DEV_GUEST_MSI;
+ adev->irq_requested_type &= ~KVM_ASSIGNED_DEV_GUEST_INTX;
+ adev->guest_msi.address_lo = airq->guest_msi.addr_lo;
+ adev->guest_msi.data = airq->guest_msi.data;
+ adev->ack_notifier.gsi = -1;
+ } else if (msi2intx) {
+ adev->irq_requested_type |= KVM_ASSIGNED_DEV_GUEST_INTX;
+ adev->irq_requested_type &= ~KVM_ASSIGNED_DEV_GUEST_MSI;
+ adev->guest_irq = airq->guest_irq;
+ adev->ack_notifier.gsi = airq->guest_irq;
+ }
+
+ if (adev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_MSI)
+ return 0;
+
+ if (irqchip_in_kernel(kvm)) {
+ if (!msi2intx) {
+ if (adev->irq_requested_type &
+ KVM_ASSIGNED_DEV_HOST_INTX)
+ free_irq(adev->host_irq, (void *)adev);
+
+ r = pci_enable_msi(adev->dev);
+ if (r)
+ return r;
+ }
+
+ adev->host_irq = adev->dev->irq;
+ if (request_irq(adev->host_irq, kvm_assigned_dev_intr, 0,
+ "kvm_assigned_msi_device", (void *)adev))
+ return -EIO;
+ }
+
+ if (!msi2intx)
+ adev->irq_requested_type = KVM_ASSIGNED_DEV_GUEST_MSI;
+
+ adev->irq_requested_type |= KVM_ASSIGNED_DEV_HOST_MSI;
+ return 0;
+}
+#endif
+
+static int kvm_vm_ioctl_assign_irq(struct kvm *kvm,
+ struct kvm_assigned_irq
+ *assigned_irq)
+{
+ int r = 0;
+ struct kvm_assigned_dev_kernel *match;
+
+ mutex_lock(&kvm->lock);
+
+ match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
+ assigned_irq->assigned_dev_id);
+ if (!match) {
+ mutex_unlock(&kvm->lock);
+ return -EINVAL;
+ }
+
+ if (!match->irq_requested_type) {
+ INIT_WORK(&match->interrupt_work,
+ kvm_assigned_dev_interrupt_work_handler);
+ if (irqchip_in_kernel(kvm)) {
+ /* Register ack nofitier */
+ match->ack_notifier.gsi = -1;
+ match->ack_notifier.irq_acked =
+ kvm_assigned_dev_ack_irq;
+ kvm_register_irq_ack_notifier(kvm,
+ &match->ack_notifier);
+
+ /* Request IRQ source ID */
+ r = kvm_request_irq_source_id(kvm);
+ if (r < 0)
+ goto out_release;
+ else
+ match->irq_source_id = r;
+
+#ifdef CONFIG_X86
+ /* Determine host device irq type, we can know the
+ * result from dev->msi_enabled */
+ if (msi2intx)
+ pci_enable_msi(match->dev);
+#endif
+ }
+ }
+
+ if ((!msi2intx &&
+ (assigned_irq->flags & KVM_DEV_IRQ_ASSIGN_ENABLE_MSI)) ||
+ (msi2intx && match->dev->msi_enabled)) {
+#ifdef CONFIG_X86
+ r = assigned_device_update_msi(kvm, match, assigned_irq);
+ if (r) {
+ printk(KERN_WARNING "kvm: failed to enable "
+ "MSI device!\n");
+ goto out_release;
+ }
+#else
+ r = -ENOTTY;
+#endif
+ } else if (assigned_irq->host_irq == 0 && match->dev->irq == 0) {
+ /* Host device IRQ 0 means don't support INTx */
+ if (!msi2intx) {
+ printk(KERN_WARNING
+ "kvm: wait device to enable MSI!\n");
+ r = 0;
+ } else {
+ printk(KERN_WARNING
+ "kvm: failed to enable MSI device!\n");
+ r = -ENOTTY;
+ goto out_release;
+ }
+ } else {
+ /* Non-sharing INTx mode */
+ r = assigned_device_update_intx(kvm, match, assigned_irq);
+ if (r) {
+ printk(KERN_WARNING "kvm: failed to enable "
+ "INTx device!\n");
+ goto out_release;
+ }
+ }
+
+ mutex_unlock(&kvm->lock);
+ return r;
+out_release:
+ mutex_unlock(&kvm->lock);
+ kvm_free_assigned_device(kvm, match);
+ return r;
+}
+
+static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
+ struct kvm_assigned_pci_dev *assigned_dev)
+{
+ int r = 0;
+ struct kvm_assigned_dev_kernel *match;
+ struct pci_dev *dev;
+
+ down_read(&kvm->slots_lock);
+ mutex_lock(&kvm->lock);
+
+ match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
+ assigned_dev->assigned_dev_id);
+ if (match) {
+ /* device already assigned */
+ r = -EINVAL;
+ goto out;
+ }
+
+ match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL);
+ if (match == NULL) {
+ printk(KERN_INFO "%s: Couldn't allocate memory\n",
+ __func__);
+ r = -ENOMEM;
+ goto out;
+ }
+ dev = pci_get_bus_and_slot(assigned_dev->busnr,
+ assigned_dev->devfn);
+ if (!dev) {
+ printk(KERN_INFO "%s: host device not found\n", __func__);
+ r = -EINVAL;
+ goto out_free;
+ }
+ if (pci_enable_device(dev)) {
+ printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
+ r = -EBUSY;
+ goto out_put;
+ }
+ r = pci_request_regions(dev, "kvm_assigned_device");
+ if (r) {
+ printk(KERN_INFO "%s: Could not get access to device regions\n",
+ __func__);
+ goto out_disable;
+ }
+
+ pci_reset_function(dev);
+
+ match->assigned_dev_id = assigned_dev->assigned_dev_id;
+ match->host_busnr = assigned_dev->busnr;
+ match->host_devfn = assigned_dev->devfn;
+ match->flags = assigned_dev->flags;
+ match->dev = dev;
+ match->irq_source_id = -1;
+ match->kvm = kvm;
+
+ list_add(&match->list, &kvm->arch.assigned_dev_head);
+
+ if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) {
+ if (!kvm->arch.iommu_domain) {
+ r = kvm_iommu_map_guest(kvm);
+ if (r)
+ goto out_list_del;
+ }
+ r = kvm_assign_device(kvm, match);
+ if (r)
+ goto out_list_del;
+ }
+
+out:
+ mutex_unlock(&kvm->lock);
+ up_read(&kvm->slots_lock);
+ return r;
+out_list_del:
+ list_del(&match->list);
+ pci_release_regions(dev);
+out_disable:
+ pci_disable_device(dev);
+out_put:
+ pci_dev_put(dev);
+out_free:
+ kfree(match);
+ mutex_unlock(&kvm->lock);
+ up_read(&kvm->slots_lock);
+ return r;
+}
+#endif
+
+#ifdef KVM_CAP_DEVICE_DEASSIGNMENT
+static int kvm_vm_ioctl_deassign_device(struct kvm *kvm,
+ struct kvm_assigned_pci_dev *assigned_dev)
+{
+ int r = 0;
+ struct kvm_assigned_dev_kernel *match;
+
+ mutex_lock(&kvm->lock);
+
+ match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
+ assigned_dev->assigned_dev_id);
+ if (!match) {
+ printk(KERN_INFO "%s: device hasn't been assigned before, "
+ "so cannot be deassigned\n", __func__);
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)
+ kvm_deassign_device(kvm, match);
+
+ kvm_free_assigned_device(kvm, match);
+
+out:
+ mutex_unlock(&kvm->lock);
+ return r;
+}
+#endif
static inline int valid_vcpu(int n)
{
return likely(n >= 0 && n < KVM_MAX_VCPUS);
}
+inline int kvm_is_mmio_pfn(pfn_t pfn)
+{
+ if (pfn_valid(pfn))
+ return PageReserved(pfn_to_page(pfn));
+
+ return true;
+}
+
/*
* Switches to specified vcpu, until a matching vcpu_put()
*/
{
}
-void kvm_flush_remote_tlbs(struct kvm *kvm)
+static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
{
- int i, cpu;
- cpumask_t cpus;
+ int i, cpu, me;
+ cpumask_var_t cpus;
+ bool called = true;
struct kvm_vcpu *vcpu;
- cpus_clear(cpus);
+ if (alloc_cpumask_var(&cpus, GFP_ATOMIC))
+ cpumask_clear(cpus);
+
+ me = get_cpu();
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
vcpu = kvm->vcpus[i];
if (!vcpu)
continue;
- if (test_and_set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
+ if (test_and_set_bit(req, &vcpu->requests))
continue;
cpu = vcpu->cpu;
- if (cpu != -1 && cpu != raw_smp_processor_id())
- cpu_set(cpu, cpus);
+ if (cpus != NULL && cpu != -1 && cpu != me)
+ cpumask_set_cpu(cpu, cpus);
}
- if (cpus_empty(cpus))
- return;
- ++kvm->stat.remote_tlb_flush;
- smp_call_function_mask(cpus, ack_flush, NULL, 1);
+ if (unlikely(cpus == NULL))
+ smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
+ else if (!cpumask_empty(cpus))
+ smp_call_function_many(cpus, ack_flush, NULL, 1);
+ else
+ called = false;
+ put_cpu();
+ free_cpumask_var(cpus);
+ return called;
}
-void kvm_reload_remote_mmus(struct kvm *kvm)
+void kvm_flush_remote_tlbs(struct kvm *kvm)
{
- int i, cpu;
- cpumask_t cpus;
- struct kvm_vcpu *vcpu;
-
- cpus_clear(cpus);
- for (i = 0; i < KVM_MAX_VCPUS; ++i) {
- vcpu = kvm->vcpus[i];
- if (!vcpu)
- continue;
- if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
- continue;
- cpu = vcpu->cpu;
- if (cpu != -1 && cpu != raw_smp_processor_id())
- cpu_set(cpu, cpus);
- }
- if (cpus_empty(cpus))
- return;
- smp_call_function_mask(cpus, ack_flush, NULL, 1);
+ if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
+ ++kvm->stat.remote_tlb_flush;
}
+void kvm_reload_remote_mmus(struct kvm *kvm)
+{
+ make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
+}
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
{
}
EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
+#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
+static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
+{
+ return container_of(mn, struct kvm, mmu_notifier);
+}
+
+static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long address)
+{
+ struct kvm *kvm = mmu_notifier_to_kvm(mn);
+ int need_tlb_flush;
+
+ /*
+ * When ->invalidate_page runs, the linux pte has been zapped
+ * already but the page is still allocated until
+ * ->invalidate_page returns. So if we increase the sequence
+ * here the kvm page fault will notice if the spte can't be
+ * established because the page is going to be freed. If
+ * instead the kvm page fault establishes the spte before
+ * ->invalidate_page runs, kvm_unmap_hva will release it
+ * before returning.
+ *
+ * The sequence increase only need to be seen at spin_unlock
+ * time, and not at spin_lock time.
+ *
+ * Increasing the sequence after the spin_unlock would be
+ * unsafe because the kvm page fault could then establish the
+ * pte after kvm_unmap_hva returned, without noticing the page
+ * is going to be freed.
+ */
+ spin_lock(&kvm->mmu_lock);
+ kvm->mmu_notifier_seq++;
+ need_tlb_flush = kvm_unmap_hva(kvm, address);
+ spin_unlock(&kvm->mmu_lock);
+
+ /* we've to flush the tlb before the pages can be freed */
+ if (need_tlb_flush)
+ kvm_flush_remote_tlbs(kvm);
+
+}
+
+static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ struct kvm *kvm = mmu_notifier_to_kvm(mn);
+ int need_tlb_flush = 0;
+
+ spin_lock(&kvm->mmu_lock);
+ /*
+ * The count increase must become visible at unlock time as no
+ * spte can be established without taking the mmu_lock and
+ * count is also read inside the mmu_lock critical section.
+ */
+ kvm->mmu_notifier_count++;
+ for (; start < end; start += PAGE_SIZE)
+ need_tlb_flush |= kvm_unmap_hva(kvm, start);
+ spin_unlock(&kvm->mmu_lock);
+
+ /* we've to flush the tlb before the pages can be freed */
+ if (need_tlb_flush)
+ kvm_flush_remote_tlbs(kvm);
+}
+
+static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ struct kvm *kvm = mmu_notifier_to_kvm(mn);
+
+ spin_lock(&kvm->mmu_lock);
+ /*
+ * This sequence increase will notify the kvm page fault that
+ * the page that is going to be mapped in the spte could have
+ * been freed.
+ */
+ kvm->mmu_notifier_seq++;
+ /*
+ * The above sequence increase must be visible before the
+ * below count decrease but both values are read by the kvm
+ * page fault under mmu_lock spinlock so we don't need to add
+ * a smb_wmb() here in between the two.
+ */
+ kvm->mmu_notifier_count--;
+ spin_unlock(&kvm->mmu_lock);
+
+ BUG_ON(kvm->mmu_notifier_count < 0);
+}
+
+static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long address)
+{
+ struct kvm *kvm = mmu_notifier_to_kvm(mn);
+ int young;
+
+ spin_lock(&kvm->mmu_lock);
+ young = kvm_age_hva(kvm, address);
+ spin_unlock(&kvm->mmu_lock);
+
+ if (young)
+ kvm_flush_remote_tlbs(kvm);
+
+ return young;
+}
+
+static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
+ struct mm_struct *mm)
+{
+ struct kvm *kvm = mmu_notifier_to_kvm(mn);
+ kvm_arch_flush_shadow(kvm);
+}
+
+static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
+ .invalidate_page = kvm_mmu_notifier_invalidate_page,
+ .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
+ .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
+ .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
+ .release = kvm_mmu_notifier_release,
+};
+#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
+
static struct kvm *kvm_create_vm(void)
{
struct kvm *kvm = kvm_arch_create_vm();
(struct kvm_coalesced_mmio_ring *)page_address(page);
#endif
+#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
+ {
+ int err;
+ kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
+ err = mmu_notifier_register(&kvm->mmu_notifier, current->mm);
+ if (err) {
+#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
+ put_page(page);
+#endif
+ kfree(kvm);
+ return ERR_PTR(err);
+ }
+ }
+#endif
+
kvm->mm = current->mm;
atomic_inc(&kvm->mm->mm_count);
spin_lock_init(&kvm->mmu_lock);
{
struct mm_struct *mm = kvm->mm;
+ kvm_arch_sync_events(kvm);
spin_lock(&kvm_lock);
list_del(&kvm->vm_list);
spin_unlock(&kvm_lock);
if (kvm->coalesced_mmio_ring != NULL)
free_page((unsigned long)kvm->coalesced_mmio_ring);
#endif
+#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
+ mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
+#endif
kvm_arch_destroy_vm(kvm);
mmdrop(mm);
}
goto out;
if (mem->guest_phys_addr & (PAGE_SIZE - 1))
goto out;
+ if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1)))
+ goto out;
if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
goto out;
if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
r = -ENOMEM;
/* Allocate if a slot is being created */
+#ifndef CONFIG_S390
if (npages && !new.rmap) {
new.rmap = vmalloc(npages * sizeof(struct page *));
memset(new.rmap, 0, npages * sizeof(*new.rmap));
new.user_alloc = user_alloc;
- new.userspace_addr = mem->userspace_addr;
+ /*
+ * hva_to_rmmap() serialzies with the mmu_lock and to be
+ * safe it has to ignore memslots with !user_alloc &&
+ * !userspace_addr.
+ */
+ if (user_alloc)
+ new.userspace_addr = mem->userspace_addr;
+ else
+ new.userspace_addr = 0;
}
if (npages && !new.lpage_info) {
int largepages = npages / KVM_PAGES_PER_HPAGE;
goto out_free;
memset(new.dirty_bitmap, 0, dirty_bytes);
}
+#endif /* not defined CONFIG_S390 */
+ if (!npages)
+ kvm_arch_flush_shadow(kvm);
+
+ spin_lock(&kvm->mmu_lock);
if (mem->slot >= kvm->nmemslots)
kvm->nmemslots = mem->slot + 1;
*memslot = new;
+ spin_unlock(&kvm->mmu_lock);
r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
if (r) {
+ spin_lock(&kvm->mmu_lock);
*memslot = old;
+ spin_unlock(&kvm->mmu_lock);
goto out_free;
}
- kvm_free_physmem_slot(&old, &new);
+ kvm_free_physmem_slot(&old, npages ? &new : NULL);
+ /* Slot deletion case: we have to update the current slot */
+ if (!npages)
+ *memslot = old;
+#ifdef CONFIG_DMAR
+ /* map the pages in iommu page table */
+ r = kvm_iommu_map_pages(kvm, base_gfn, npages);
+ if (r)
+ goto out;
+#endif
return 0;
out_free:
}
EXPORT_SYMBOL_GPL(kvm_is_error_hva);
-static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
+struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn)
{
int i;
}
return NULL;
}
+EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased);
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
{
gfn = unalias_gfn(kvm, gfn);
- return __gfn_to_memslot(kvm, gfn);
+ return gfn_to_memslot_unaliased(kvm, gfn);
}
int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
struct kvm_memory_slot *slot;
gfn = unalias_gfn(kvm, gfn);
- slot = __gfn_to_memslot(kvm, gfn);
+ slot = gfn_to_memslot_unaliased(kvm, gfn);
if (!slot)
return bad_hva();
return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
}
EXPORT_SYMBOL_GPL(gfn_to_hva);
-/*
- * Requires current->mm->mmap_sem to be held
- */
pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
{
struct page *page[1];
return page_to_pfn(bad_page);
}
- npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page,
- NULL);
+ npages = get_user_pages_fast(addr, 1, 1, page);
if (unlikely(npages != 1)) {
struct vm_area_struct *vma;
+ down_read(¤t->mm->mmap_sem);
vma = find_vma(current->mm, addr);
+
if (vma == NULL || addr < vma->vm_start ||
!(vma->vm_flags & VM_PFNMAP)) {
+ up_read(¤t->mm->mmap_sem);
get_page(bad_page);
return page_to_pfn(bad_page);
}
pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
- BUG_ON(pfn_valid(pfn));
+ up_read(¤t->mm->mmap_sem);
+ BUG_ON(!kvm_is_mmio_pfn(pfn));
} else
pfn = page_to_pfn(page[0]);
pfn_t pfn;
pfn = gfn_to_pfn(kvm, gfn);
- if (pfn_valid(pfn))
+ if (!kvm_is_mmio_pfn(pfn))
return pfn_to_page(pfn);
- WARN_ON(!pfn_valid(pfn));
+ WARN_ON(kvm_is_mmio_pfn(pfn));
get_page(bad_page);
return bad_page;
void kvm_release_pfn_clean(pfn_t pfn)
{
- if (pfn_valid(pfn))
+ if (!kvm_is_mmio_pfn(pfn))
put_page(pfn_to_page(pfn));
}
EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
void kvm_set_pfn_dirty(pfn_t pfn)
{
- if (pfn_valid(pfn)) {
+ if (!kvm_is_mmio_pfn(pfn)) {
struct page *page = pfn_to_page(pfn);
if (!PageReserved(page))
SetPageDirty(page);
void kvm_set_pfn_accessed(pfn_t pfn)
{
- if (pfn_valid(pfn))
+ if (!kvm_is_mmio_pfn(pfn))
mark_page_accessed(pfn_to_page(pfn));
}
EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
void kvm_get_pfn(pfn_t pfn)
{
- if (pfn_valid(pfn))
+ if (!kvm_is_mmio_pfn(pfn))
get_page(pfn_to_page(pfn));
}
EXPORT_SYMBOL_GPL(kvm_get_pfn);
struct kvm_memory_slot *memslot;
gfn = unalias_gfn(kvm, gfn);
- memslot = __gfn_to_memslot(kvm, gfn);
+ memslot = gfn_to_memslot_unaliased(kvm, gfn);
if (memslot && memslot->dirty_bitmap) {
unsigned long rel_gfn = gfn - memslot->base_gfn;
for (;;) {
prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
- if (kvm_cpu_has_interrupt(vcpu))
- break;
- if (kvm_cpu_has_pending_timer(vcpu))
- break;
- if (kvm_arch_vcpu_runnable(vcpu))
+ if (kvm_cpu_has_interrupt(vcpu) ||
+ kvm_cpu_has_pending_timer(vcpu) ||
+ kvm_arch_vcpu_runnable(vcpu)) {
+ set_bit(KVM_REQ_UNHALT, &vcpu->requests);
break;
+ }
if (signal_pending(current))
break;
return 0;
}
-static const struct file_operations kvm_vcpu_fops = {
+static struct file_operations kvm_vcpu_fops = {
.release = kvm_vcpu_release,
.unlocked_ioctl = kvm_vcpu_ioctl,
.compat_ioctl = kvm_vcpu_ioctl,
*/
static int create_vcpu_fd(struct kvm_vcpu *vcpu)
{
- int fd = anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu);
+ int fd = anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, 0);
if (fd < 0)
kvm_put_kvm(vcpu->kvm);
return fd;
r = kvm_arch_vcpu_setup(vcpu);
if (r)
- goto vcpu_destroy;
+ return r;
mutex_lock(&kvm->lock);
if (kvm->vcpus[n]) {
r = -EEXIST;
- mutex_unlock(&kvm->lock);
goto vcpu_destroy;
}
kvm->vcpus[n] = vcpu;
unlink:
mutex_lock(&kvm->lock);
kvm->vcpus[n] = NULL;
- mutex_unlock(&kvm->lock);
vcpu_destroy:
+ mutex_unlock(&kvm->lock);
kvm_arch_vcpu_destroy(vcpu);
return r;
}
struct kvm_vcpu *vcpu = filp->private_data;
void __user *argp = (void __user *)arg;
int r;
+ struct kvm_fpu *fpu = NULL;
+ struct kvm_sregs *kvm_sregs = NULL;
if (vcpu->kvm->mm != current->mm)
return -EIO;
break;
}
case KVM_GET_SREGS: {
- struct kvm_sregs kvm_sregs;
-
- memset(&kvm_sregs, 0, sizeof kvm_sregs);
- r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs);
+ kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
+ r = -ENOMEM;
+ if (!kvm_sregs)
+ goto out;
+ r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
if (r)
goto out;
r = -EFAULT;
- if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs))
+ if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
goto out;
r = 0;
break;
}
case KVM_SET_SREGS: {
- struct kvm_sregs kvm_sregs;
-
+ kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
+ r = -ENOMEM;
+ if (!kvm_sregs)
+ goto out;
r = -EFAULT;
- if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
+ if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs)))
goto out;
- r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs);
+ r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
if (r)
goto out;
r = 0;
break;
}
case KVM_GET_FPU: {
- struct kvm_fpu fpu;
-
- memset(&fpu, 0, sizeof fpu);
- r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, &fpu);
+ fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
+ r = -ENOMEM;
+ if (!fpu)
+ goto out;
+ r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
if (r)
goto out;
r = -EFAULT;
- if (copy_to_user(argp, &fpu, sizeof fpu))
+ if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
goto out;
r = 0;
break;
}
case KVM_SET_FPU: {
- struct kvm_fpu fpu;
-
+ fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
+ r = -ENOMEM;
+ if (!fpu)
+ goto out;
r = -EFAULT;
- if (copy_from_user(&fpu, argp, sizeof fpu))
+ if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu)))
goto out;
- r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, &fpu);
+ r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
if (r)
goto out;
r = 0;
r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
}
out:
+ kfree(fpu);
+ kfree(kvm_sregs);
return r;
}
break;
}
#endif
+#ifdef KVM_CAP_DEVICE_ASSIGNMENT
+ case KVM_ASSIGN_PCI_DEVICE: {
+ struct kvm_assigned_pci_dev assigned_dev;
+
+ r = -EFAULT;
+ if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
+ goto out;
+ r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev);
+ if (r)
+ goto out;
+ break;
+ }
+ case KVM_ASSIGN_IRQ: {
+ struct kvm_assigned_irq assigned_irq;
+
+ r = -EFAULT;
+ if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
+ goto out;
+ r = kvm_vm_ioctl_assign_irq(kvm, &assigned_irq);
+ if (r)
+ goto out;
+ break;
+ }
+#endif
+#ifdef KVM_CAP_DEVICE_DEASSIGNMENT
+ case KVM_DEASSIGN_PCI_DEVICE: {
+ struct kvm_assigned_pci_dev assigned_dev;
+
+ r = -EFAULT;
+ if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
+ goto out;
+ r = kvm_vm_ioctl_deassign_device(kvm, &assigned_dev);
+ if (r)
+ goto out;
+ break;
+ }
+#endif
default:
r = kvm_arch_vm_ioctl(filp, ioctl, arg);
}
static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
+ struct page *page[1];
+ unsigned long addr;
+ int npages;
+ gfn_t gfn = vmf->pgoff;
struct kvm *kvm = vma->vm_file->private_data;
- struct page *page;
- if (!kvm_is_visible_gfn(kvm, vmf->pgoff))
+ addr = gfn_to_hva(kvm, gfn);
+ if (kvm_is_error_hva(addr))
return VM_FAULT_SIGBUS;
- page = gfn_to_page(kvm, vmf->pgoff);
- if (is_error_page(page)) {
- kvm_release_page_clean(page);
+
+ npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
+ NULL);
+ if (unlikely(npages != 1))
return VM_FAULT_SIGBUS;
- }
- vmf->page = page;
+
+ vmf->page = page[0];
return 0;
}
return 0;
}
-static const struct file_operations kvm_vm_fops = {
+static struct file_operations kvm_vm_fops = {
.release = kvm_vm_release,
.unlocked_ioctl = kvm_vm_ioctl,
.compat_ioctl = kvm_vm_ioctl,
kvm = kvm_create_vm();
if (IS_ERR(kvm))
return PTR_ERR(kvm);
- fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm);
+ fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, 0);
if (fd < 0)
kvm_put_kvm(kvm);
return fd;
}
+static long kvm_dev_ioctl_check_extension_generic(long arg)
+{
+ switch (arg) {
+ case KVM_CAP_USER_MEMORY:
+ case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
+ return 1;
+ default:
+ break;
+ }
+ return kvm_dev_ioctl_check_extension(arg);
+}
+
static long kvm_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
r = kvm_dev_ioctl_create_vm();
break;
case KVM_CHECK_EXTENSION:
- r = kvm_dev_ioctl_check_extension(arg);
+ r = kvm_dev_ioctl_check_extension_generic(arg);
break;
case KVM_GET_VCPU_MMAP_SIZE:
r = -EINVAL;
{
int cpu = raw_smp_processor_id();
- if (cpu_isset(cpu, cpus_hardware_enabled))
+ if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
return;
- cpu_set(cpu, cpus_hardware_enabled);
+ cpumask_set_cpu(cpu, cpus_hardware_enabled);
kvm_arch_hardware_enable(NULL);
}
{
int cpu = raw_smp_processor_id();
- if (!cpu_isset(cpu, cpus_hardware_enabled))
+ if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
return;
- cpu_clear(cpu, cpus_hardware_enabled);
+ cpumask_clear_cpu(cpu, cpus_hardware_enabled);
kvm_arch_hardware_disable(NULL);
}
bad_pfn = page_to_pfn(bad_page);
+ if (!alloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
+ r = -ENOMEM;
+ goto out_free_0;
+ }
+
r = kvm_arch_hardware_setup();
if (r < 0)
- goto out_free_0;
+ goto out_free_0a;
for_each_online_cpu(cpu) {
smp_call_function_single(cpu,
}
kvm_chardev_ops.owner = module;
+ kvm_vm_fops.owner = module;
+ kvm_vcpu_fops.owner = module;
r = misc_register(&kvm_dev);
if (r) {
kvm_preempt_ops.sched_in = kvm_sched_in;
kvm_preempt_ops.sched_out = kvm_sched_out;
+#ifndef CONFIG_X86
+ msi2intx = 0;
+#endif
return 0;
on_each_cpu(hardware_disable, NULL, 1);
out_free_1:
kvm_arch_hardware_unsetup();
+out_free_0a:
+ free_cpumask_var(cpus_hardware_enabled);
out_free_0:
__free_page(bad_page);
out:
kvm_arch_hardware_unsetup();
kvm_arch_exit();
kvm_exit_debug();
+ free_cpumask_var(cpus_hardware_enabled);
__free_page(bad_page);
}
EXPORT_SYMBOL_GPL(kvm_exit);