return container_of(dev, struct kvm_coalesced_mmio_dev, dev);
}
-static int coalesced_mmio_in_range(struct kvm_io_device *this,
- gpa_t addr, int len, int is_write)
+static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
+ gpa_t addr, int len)
{
- struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
struct kvm_coalesced_mmio_zone *zone;
struct kvm_coalesced_mmio_ring *ring;
unsigned avail;
int i;
- if (!is_write)
- return 0;
-
/* Are we able to batch it ? */
/* last is the first free entry
return 0;
}
-static void coalesced_mmio_write(struct kvm_io_device *this,
- gpa_t addr, int len, const void *val)
+static int coalesced_mmio_write(struct kvm_io_device *this,
+ gpa_t addr, int len, const void *val)
{
struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
+ if (!coalesced_mmio_in_range(dev, addr, len))
+ return -EOPNOTSUPP;
spin_lock(&dev->lock);
smp_wmb();
ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
spin_unlock(&dev->lock);
+ return 0;
}
static void coalesced_mmio_destructor(struct kvm_io_device *this)
static const struct kvm_io_device_ops coalesced_mmio_ops = {
.write = coalesced_mmio_write,
- .in_range = coalesced_mmio_in_range,
.destructor = coalesced_mmio_destructor,
};
int kvm_coalesced_mmio_init(struct kvm *kvm)
{
struct kvm_coalesced_mmio_dev *dev;
+ int ret;
dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
if (!dev)
kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
dev->kvm = kvm;
kvm->coalesced_mmio_dev = dev;
- kvm_io_bus_register_dev(&kvm->mmio_bus, &dev->dev);
- return 0;
+ ret = kvm_io_bus_register_dev(kvm, &kvm->mmio_bus, &dev->dev);
+ if (ret < 0)
+ kfree(dev);
+
+ return ret;
}
int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,