return false;
vbr->req = req;
- if (blk_fs_request(vbr->req)) {
+ switch (req->cmd_type) {
+ case REQ_TYPE_FS:
vbr->out_hdr.type = 0;
vbr->out_hdr.sector = blk_rq_pos(vbr->req);
vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
- } else if (blk_pc_request(vbr->req)) {
+ break;
+ case REQ_TYPE_BLOCK_PC:
vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
vbr->out_hdr.sector = 0;
vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
- } else {
+ break;
+ case REQ_TYPE_LINUX_BLOCK:
+ if (req->cmd[0] == REQ_LB_OP_FLUSH) {
+ vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
+ vbr->out_hdr.sector = 0;
+ vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
+ break;
+ }
+ /*FALLTHRU*/
+ default:
/* We don't put anything else in the queue. */
BUG();
}
}
}
- if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr)) {
+ if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr) < 0) {
mempool_free(vbr, vblk->pool);
return false;
}
vblk->vq->vq_ops->kick(vblk->vq);
}
+/* return ATA identify data
+ */
+static int virtblk_identify(struct gendisk *disk, void *argp)
+{
+ struct virtio_blk *vblk = disk->private_data;
+ void *opaque;
+ int err = -ENOMEM;
+
+ opaque = kmalloc(VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
+ if (!opaque)
+ goto out;
+
+ err = virtio_config_buf(vblk->vdev, VIRTIO_BLK_F_IDENTIFY,
+ offsetof(struct virtio_blk_config, identify), opaque,
+ VIRTIO_BLK_ID_BYTES);
+
+ if (err)
+ goto out_kfree;
+
+ if (copy_to_user(argp, opaque, VIRTIO_BLK_ID_BYTES))
+ err = -EFAULT;
+
+out_kfree:
+ kfree(opaque);
+out:
+ return err;
+}
+
+static void virtblk_prepare_flush(struct request_queue *q, struct request *req)
+{
+ req->cmd_type = REQ_TYPE_LINUX_BLOCK;
+ req->cmd[0] = REQ_LB_OP_FLUSH;
+}
+
static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
unsigned cmd, unsigned long data)
{
struct gendisk *disk = bdev->bd_disk;
struct virtio_blk *vblk = disk->private_data;
+ void __user *argp = (void __user *)data;
+
+ if (cmd == HDIO_GET_IDENTITY)
+ return virtblk_identify(disk, argp);
/*
* Only allow the generic SCSI ioctls if the host can support it.
*/
if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
- return -ENOIOCTLCMD;
+ return -ENOTTY;
- return scsi_cmd_ioctl(disk->queue, disk, mode, cmd,
- (void __user *)data);
+ return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
}
/* We provide getgeo only to please some old bootloader/partitioning tools */
return 0;
}
-static struct block_device_operations virtblk_fops = {
+static const struct block_device_operations virtblk_fops = {
.locked_ioctl = virtblk_ioctl,
.owner = THIS_MODULE,
.getgeo = virtblk_getgeo,
return index << PART_BITS;
}
-static int virtblk_probe(struct virtio_device *vdev)
+static int __devinit virtblk_probe(struct virtio_device *vdev)
{
struct virtio_blk *vblk;
int err;
sg_init_table(vblk->sg, vblk->sg_elems);
/* We expect one virtqueue, for output. */
- vblk->vq = vdev->config->find_vq(vdev, 0, blk_done);
+ vblk->vq = virtio_find_single_vq(vdev, blk_done, "requests");
if (IS_ERR(vblk->vq)) {
err = PTR_ERR(vblk->vq);
goto out_free_vblk;
}
vblk->disk->queue->queuedata = vblk;
- queue_flag_set_unlocked(QUEUE_FLAG_VIRT, vblk->disk->queue);
if (index < 26) {
sprintf(vblk->disk->disk_name, "vd%c", 'a' + index % 26);
index++;
/* If barriers are supported, tell block layer that queue is ordered */
- if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER))
+ if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH))
+ blk_queue_ordered(vblk->disk->queue, QUEUE_ORDERED_DRAIN_FLUSH,
+ virtblk_prepare_flush);
+ else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER))
blk_queue_ordered(vblk->disk->queue, QUEUE_ORDERED_TAG, NULL);
/* If disk is read-only in the host, the guest should obey */
blk_queue_max_phys_segments(vblk->disk->queue, vblk->sg_elems-2);
blk_queue_max_hw_segments(vblk->disk->queue, vblk->sg_elems-2);
+ /* No need to bounce any requests */
+ blk_queue_bounce_limit(vblk->disk->queue, BLK_BOUNCE_ANY);
+
/* No real sector limit. */
blk_queue_max_sectors(vblk->disk->queue, -1U);
offsetof(struct virtio_blk_config, blk_size),
&blk_size);
if (!err)
- blk_queue_hardsect_size(vblk->disk->queue, blk_size);
+ blk_queue_logical_block_size(vblk->disk->queue, blk_size);
add_disk(vblk->disk);
return 0;
out_mempool:
mempool_destroy(vblk->pool);
out_free_vq:
- vdev->config->del_vq(vblk->vq);
+ vdev->config->del_vqs(vdev);
out_free_vblk:
kfree(vblk);
out:
return err;
}
-static void virtblk_remove(struct virtio_device *vdev)
+static void __devexit virtblk_remove(struct virtio_device *vdev)
{
struct virtio_blk *vblk = vdev->priv;
blk_cleanup_queue(vblk->disk->queue);
put_disk(vblk->disk);
mempool_destroy(vblk->pool);
- vdev->config->del_vq(vblk->vq);
+ vdev->config->del_vqs(vdev);
kfree(vblk);
}
static unsigned int features[] = {
VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX,
VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
- VIRTIO_BLK_F_SCSI,
+ VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_IDENTIFY, VIRTIO_BLK_F_FLUSH
};
-static struct virtio_driver virtio_blk = {
+/*
+ * virtio_blk causes spurious section mismatch warning by
+ * simultaneously referring to a __devinit and a __devexit function.
+ * Use __refdata to avoid this warning.
+ */
+static struct virtio_driver __refdata virtio_blk = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.driver.name = KBUILD_MODNAME,