3 * Copyright (c) 2009, Microsoft Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 * Hank Janssen <hjanssen@microsoft.com>
23 #include <linux/init.h>
24 #include <linux/module.h>
25 #include <linux/device.h>
26 #include <linux/blkdev.h>
27 #include <linux/major.h>
28 #include <linux/delay.h>
29 #include <linux/hdreg.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_dbg.h>
36 #include "include/logging.h"
37 #include "include/vmbus.h"
39 #include "include/StorVscApi.h"
44 #define BLKVSC_MINORS 64
49 enum blkvsc_device_type {
55 // This request ties the struct request and struct blkvsc_request/STORVSC_REQUEST together
56 // A struct request may be represented by 1 or more struct blkvsc_request
57 struct blkvsc_request_group {
61 struct list_head blkvsc_req_list; // list of blkvsc_requests
65 struct blkvsc_request {
66 struct list_head req_entry; // blkvsc_request_group.blkvsc_req_list
68 struct list_head pend_entry; // block_device_context.pending_list
70 struct request *req; // This may be null if we generate a request internally
71 struct block_device_context *dev;
72 struct blkvsc_request_group *group; // The group this request is part of. Maybe null
74 wait_queue_head_t wevent;
78 sector_t sector_start;
79 unsigned long sector_count;
81 unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE];
82 unsigned char cmd_len;
83 unsigned char cmnd[MAX_COMMAND_SIZE];
85 STORVSC_REQUEST request;
86 // !!!DO NOT ADD ANYTHING BELOW HERE!!! Otherwise, memory can overlap, because -
87 // The extension buffer falls right here and is pointed to by request.Extension;
90 // Per device structure
91 struct block_device_context {
92 struct device_context *device_ctx; // point back to our device context
93 struct kmem_cache *request_pool;
96 enum blkvsc_device_type device_type;
97 struct list_head pending_list;
99 unsigned char device_id[64];
100 unsigned int device_id_len;
101 int num_outstanding_reqs;
103 int media_not_present;
104 unsigned int sector_size;
108 unsigned char target;
113 struct blkvsc_driver_context {
114 // !! These must be the first 2 fields !!
115 struct driver_context drv_ctx;
116 STORVSC_DRIVER_OBJECT drv_obj;
120 static int blkvsc_probe(struct device *dev);
121 static int blkvsc_remove(struct device *device);
122 static void blkvsc_shutdown(struct device *device);
124 static int blkvsc_open(struct inode *inode, struct file *filep);
125 static int blkvsc_release(struct inode *inode, struct file *filep);
126 static int blkvsc_media_changed(struct gendisk *gd);
127 static int blkvsc_revalidate_disk(struct gendisk *gd);
128 static int blkvsc_getgeo(struct block_device *bd, struct hd_geometry *hg);
129 static int blkvsc_ioctl(struct inode *inode, struct file *filep, unsigned cmd, unsigned long arg);
131 static void blkvsc_request(struct request_queue *queue);
132 static void blkvsc_request_completion(STORVSC_REQUEST* request);
133 static int blkvsc_do_request(struct block_device_context *blkdev, struct request *req);
134 static int blkvsc_submit_request(struct blkvsc_request *blkvsc_req, void (*request_completion)(STORVSC_REQUEST*) );
135 static void blkvsc_init_rw(struct blkvsc_request *blkvsc_req);
136 static void blkvsc_cmd_completion(STORVSC_REQUEST* request);
137 static int blkvsc_do_inquiry(struct block_device_context *blkdev);
138 static int blkvsc_do_read_capacity(struct block_device_context *blkdev);
139 static int blkvsc_do_read_capacity16(struct block_device_context *blkdev);
140 static int blkvsc_do_flush(struct block_device_context *blkdev);
141 static int blkvsc_cancel_pending_reqs(struct block_device_context *blkdev);
142 static int blkvsc_do_pending_reqs(struct block_device_context *blkdev);
145 static int blkvsc_ringbuffer_size = BLKVSC_RING_BUFFER_SIZE;
147 // The one and only one
148 static struct blkvsc_driver_context g_blkvsc_drv;
151 static struct block_device_operations block_ops =
153 .owner = THIS_MODULE,
155 .release = blkvsc_release,
156 .media_changed = blkvsc_media_changed,
157 .revalidate_disk = blkvsc_revalidate_disk,
158 .getgeo = blkvsc_getgeo,
159 .ioctl = blkvsc_ioctl,
164 Name: blkvsc_drv_init()
166 Desc: BlkVsc driver initialization.
169 int blkvsc_drv_init(PFN_DRIVERINITIALIZE pfn_drv_init)
172 STORVSC_DRIVER_OBJECT *storvsc_drv_obj=&g_blkvsc_drv.drv_obj;
173 struct driver_context *drv_ctx=&g_blkvsc_drv.drv_ctx;
175 DPRINT_ENTER(BLKVSC_DRV);
177 vmbus_get_interface(&storvsc_drv_obj->Base.VmbusChannelInterface);
179 storvsc_drv_obj->RingBufferSize = blkvsc_ringbuffer_size;
181 // Callback to client driver to complete the initialization
182 pfn_drv_init(&storvsc_drv_obj->Base);
184 drv_ctx->driver.name = storvsc_drv_obj->Base.name;
185 memcpy(&drv_ctx->class_id, &storvsc_drv_obj->Base.deviceType, sizeof(GUID));
187 drv_ctx->probe = blkvsc_probe;
188 drv_ctx->remove = blkvsc_remove;
189 drv_ctx->shutdown = blkvsc_shutdown;
191 // The driver belongs to vmbus
192 vmbus_child_driver_register(drv_ctx);
194 DPRINT_EXIT(BLKVSC_DRV);
200 static int blkvsc_drv_exit_cb(struct device *dev, void *data)
202 struct device **curr = (struct device **)data;
204 return 1; // stop iterating
209 Name: blkvsc_drv_exit()
214 void blkvsc_drv_exit(void)
216 STORVSC_DRIVER_OBJECT *storvsc_drv_obj=&g_blkvsc_drv.drv_obj;
217 struct driver_context *drv_ctx=&g_blkvsc_drv.drv_ctx;
219 struct device *current_dev=NULL;
221 DPRINT_ENTER(BLKVSC_DRV);
228 driver_for_each_device(&drv_ctx->driver, NULL, (void*)¤t_dev, blkvsc_drv_exit_cb);
230 if (current_dev == NULL)
233 // Initiate removal from the top-down
234 device_unregister(current_dev);
237 if (storvsc_drv_obj->Base.OnCleanup)
238 storvsc_drv_obj->Base.OnCleanup(&storvsc_drv_obj->Base);
240 vmbus_child_driver_unregister(drv_ctx);
242 DPRINT_EXIT(BLKVSC_DRV);
251 Desc: Add a new device for this driver
254 static int blkvsc_probe(struct device *device)
258 struct driver_context *driver_ctx = driver_to_driver_context(device->driver);
259 struct blkvsc_driver_context *blkvsc_drv_ctx = (struct blkvsc_driver_context*)driver_ctx;
260 STORVSC_DRIVER_OBJECT* storvsc_drv_obj = &blkvsc_drv_ctx->drv_obj;
262 struct device_context *device_ctx = device_to_device_context(device);
263 DEVICE_OBJECT* device_obj = &device_ctx->device_obj;
265 struct block_device_context *blkdev=NULL;
266 STORVSC_DEVICE_INFO device_info;
270 static int ide0_registered=0;
271 static int ide1_registered=0;
273 DPRINT_ENTER(BLKVSC_DRV);
275 DPRINT_DBG(BLKVSC_DRV, "blkvsc_probe - enter");
277 if (!storvsc_drv_obj->Base.OnDeviceAdd)
279 DPRINT_ERR(BLKVSC_DRV, "OnDeviceAdd() not set");
285 blkdev = kzalloc(sizeof(struct block_device_context), GFP_KERNEL);
292 INIT_LIST_HEAD(&blkdev->pending_list);
294 // Initialize what we can here
295 spin_lock_init(&blkdev->lock);
297 ASSERT(sizeof(struct blkvsc_request_group) <= sizeof(struct blkvsc_request));
299 blkdev->request_pool = kmem_cache_create(dev_name(&device_ctx->device),
300 sizeof(struct blkvsc_request) + storvsc_drv_obj->RequestExtSize, 0,
301 SLAB_HWCACHE_ALIGN, NULL);
302 if (!blkdev->request_pool)
309 // Call to the vsc driver to add the device
310 ret = storvsc_drv_obj->Base.OnDeviceAdd(device_obj, &device_info);
313 DPRINT_ERR(BLKVSC_DRV, "unable to add blkvsc device");
317 blkdev->device_ctx = device_ctx;
318 blkdev->target = device_info.TargetId; // this identified the device 0 or 1
319 blkdev->path = device_info.PathId; // this identified the ide ctrl 0 or 1
321 dev_set_drvdata(device, blkdev);
323 // Calculate the major and device num
324 if (blkdev->path == 0)
327 devnum = blkdev->path + blkdev->target; // 0 or 1
329 if (!ide0_registered)
331 ret = register_blkdev(major, "ide");
334 DPRINT_ERR(BLKVSC_DRV, "register_blkdev() failed! ret %d", ret);
341 else if (blkdev->path == 1)
344 devnum = blkdev->path + blkdev->target + 1; // 2 or 3
346 if (!ide1_registered)
348 ret = register_blkdev(major, "ide");
351 DPRINT_ERR(BLKVSC_DRV, "register_blkdev() failed! ret %d", ret);
361 DPRINT_ERR(BLKVSC_DRV, "invalid pathid");
366 DPRINT_INFO(BLKVSC_DRV, "blkvsc registered for major %d!!", major);
368 blkdev->gd = alloc_disk(BLKVSC_MINORS);
371 DPRINT_ERR(BLKVSC_DRV, "register_blkdev() failed! ret %d", ret);
376 blkdev->gd->queue = blk_init_queue(blkvsc_request, &blkdev->lock);
378 blk_queue_max_segment_size(blkdev->gd->queue, PAGE_SIZE);
379 blk_queue_max_phys_segments(blkdev->gd->queue, MAX_MULTIPAGE_BUFFER_COUNT);
380 blk_queue_max_hw_segments(blkdev->gd->queue, MAX_MULTIPAGE_BUFFER_COUNT);
381 blk_queue_segment_boundary(blkdev->gd->queue, PAGE_SIZE-1);
382 blk_queue_bounce_limit(blkdev->gd->queue, BLK_BOUNCE_ANY);
383 blk_queue_dma_alignment(blkdev->gd->queue, 511);
385 blkdev->gd->major = major;
386 if (devnum == 1 || devnum == 3)
387 blkdev->gd->first_minor = BLKVSC_MINORS;
389 blkdev->gd->first_minor = 0;
390 blkdev->gd->fops = &block_ops;
391 blkdev->gd->private_data = blkdev;
392 sprintf(blkdev->gd->disk_name, "hd%c", 'a'+ devnum);
394 blkvsc_do_inquiry(blkdev);
395 if (blkdev->device_type == DVD_TYPE)
397 set_disk_ro(blkdev->gd, 1);
398 blkdev->gd->flags |= GENHD_FL_REMOVABLE;
399 blkvsc_do_read_capacity(blkdev);
403 blkvsc_do_read_capacity16(blkdev);
406 set_capacity(blkdev->gd, blkdev->capacity * (blkdev->sector_size/512));
407 blk_queue_logical_block_size(blkdev->gd->queue, blkdev->sector_size);
409 add_disk(blkdev->gd);
411 DPRINT_INFO(BLKVSC_DRV, "%s added!! capacity %llu sector_size %d", blkdev->gd->disk_name, blkdev->capacity, blkdev->sector_size);
416 storvsc_drv_obj->Base.OnDeviceRemove(device_obj);
421 if (blkdev->request_pool)
423 kmem_cache_destroy(blkdev->request_pool);
424 blkdev->request_pool = NULL;
430 DPRINT_EXIT(BLKVSC_DRV);
435 static void blkvsc_shutdown(struct device *device)
437 struct block_device_context *blkdev = dev_get_drvdata(device);
443 DPRINT_DBG(BLKVSC_DRV, "blkvsc_shutdown - users %d disk %s\n", blkdev->users, blkdev->gd->disk_name);
445 spin_lock_irqsave(&blkdev->lock, flags);
447 blkdev->shutting_down = 1;
449 blk_stop_queue(blkdev->gd->queue);
451 spin_unlock_irqrestore(&blkdev->lock, flags);
453 while (blkdev->num_outstanding_reqs)
455 DPRINT_INFO(STORVSC, "waiting for %d requests to complete...", blkdev->num_outstanding_reqs);
460 blkvsc_do_flush(blkdev);
462 spin_lock_irqsave(&blkdev->lock, flags);
464 blkvsc_cancel_pending_reqs(blkdev);
466 spin_unlock_irqrestore(&blkdev->lock, flags);
469 static int blkvsc_do_flush(struct block_device_context *blkdev)
471 struct blkvsc_request *blkvsc_req=NULL;
473 DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_flush()\n");
475 if (blkdev->device_type != HARDDISK_TYPE)
478 blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL);
484 memset(blkvsc_req, 0, sizeof(struct blkvsc_request));
485 init_waitqueue_head(&blkvsc_req->wevent);
486 blkvsc_req->dev = blkdev;
487 blkvsc_req->req = NULL;
488 blkvsc_req->write = 0;
490 blkvsc_req->request.DataBuffer.PfnArray[0] = 0;
491 blkvsc_req->request.DataBuffer.Offset = 0;
492 blkvsc_req->request.DataBuffer.Length = 0;
494 blkvsc_req->cmnd[0] = SYNCHRONIZE_CACHE;
495 blkvsc_req->cmd_len = 10;
497 // Set this here since the completion routine may be invoked and completed before we return
499 blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion);
501 wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond);
503 kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
508 // Do a scsi INQUIRY cmd here to get the device type (ie disk or dvd)
509 static int blkvsc_do_inquiry(struct block_device_context *blkdev)
511 struct blkvsc_request *blkvsc_req=NULL;
512 struct page *page_buf;
514 unsigned char device_type;
516 DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_inquiry()\n");
518 blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL);
524 memset(blkvsc_req, 0, sizeof(struct blkvsc_request));
525 page_buf = alloc_page(GFP_KERNEL);
528 kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
532 init_waitqueue_head(&blkvsc_req->wevent);
533 blkvsc_req->dev = blkdev;
534 blkvsc_req->req = NULL;
535 blkvsc_req->write = 0;
537 blkvsc_req->request.DataBuffer.PfnArray[0] = page_to_pfn(page_buf);
538 blkvsc_req->request.DataBuffer.Offset = 0;
539 blkvsc_req->request.DataBuffer.Length = 64;
541 blkvsc_req->cmnd[0] = INQUIRY;
542 blkvsc_req->cmnd[1] = 0x1; // Get product data
543 blkvsc_req->cmnd[2] = 0x83; // mode page 83
544 blkvsc_req->cmnd[4] = 64;
545 blkvsc_req->cmd_len = 6;
547 // Set this here since the completion routine may be invoked and completed before we return
550 blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion);
552 DPRINT_DBG(BLKVSC_DRV, "waiting %p to complete - cond %d\n", blkvsc_req, blkvsc_req->cond);
554 wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond);
556 buf = kmap(page_buf);
558 /* print_hex_dump_bytes("", DUMP_PREFIX_NONE, buf, 64); */
560 device_type = buf[0] & 0x1F;
562 if (device_type == 0x0)
564 blkdev->device_type = HARDDISK_TYPE;
566 else if (device_type == 0x5)
568 blkdev->device_type = DVD_TYPE;
572 // TODO: this is currently unsupported device type
573 blkdev->device_type = UNKNOWN_DEV_TYPE;
576 DPRINT_DBG(BLKVSC_DRV, "device type %d \n", device_type);
578 blkdev->device_id_len = buf[7];
579 if (blkdev->device_id_len > 64)
580 blkdev->device_id_len = 64;
582 memcpy(blkdev->device_id, &buf[8], blkdev->device_id_len);
583 /* printk_hex_dump_bytes("", DUMP_PREFIX_NONE, blkdev->device_id,
584 * blkdev->device_id_len); */
588 __free_page(page_buf);
590 kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
595 // Do a scsi READ_CAPACITY cmd here to get the size of the disk
596 static int blkvsc_do_read_capacity(struct block_device_context *blkdev)
598 struct blkvsc_request *blkvsc_req=NULL;
599 struct page *page_buf;
601 struct scsi_sense_hdr sense_hdr;
603 DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_read_capacity()\n");
605 blkdev->sector_size = 0;
606 blkdev->capacity = 0;
607 blkdev->media_not_present = 0; // assume a disk is present
609 blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL);
615 memset(blkvsc_req, 0, sizeof(struct blkvsc_request));
616 page_buf = alloc_page(GFP_KERNEL);
619 kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
623 init_waitqueue_head(&blkvsc_req->wevent);
624 blkvsc_req->dev = blkdev;
625 blkvsc_req->req = NULL;
626 blkvsc_req->write = 0;
628 blkvsc_req->request.DataBuffer.PfnArray[0] = page_to_pfn(page_buf);
629 blkvsc_req->request.DataBuffer.Offset = 0;
630 blkvsc_req->request.DataBuffer.Length = 8;
632 blkvsc_req->cmnd[0] = READ_CAPACITY;
633 blkvsc_req->cmd_len = 16;
635 // Set this here since the completion routine may be invoked and completed before we return
638 blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion);
640 DPRINT_DBG(BLKVSC_DRV, "waiting %p to complete - cond %d\n", blkvsc_req, blkvsc_req->cond);
642 wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond);
645 if (blkvsc_req->request.Status)
647 scsi_normalize_sense(blkvsc_req->sense_buffer, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
649 if (sense_hdr.asc == 0x3A) // Medium not present
651 blkdev->media_not_present = 1;
656 buf = kmap(page_buf);
659 blkdev->capacity = ((buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]) + 1;
660 blkdev->sector_size = (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7];
664 __free_page(page_buf);
666 kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
672 static int blkvsc_do_read_capacity16(struct block_device_context *blkdev)
674 struct blkvsc_request *blkvsc_req=NULL;
675 struct page *page_buf;
677 struct scsi_sense_hdr sense_hdr;
679 DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_read_capacity16()\n");
681 blkdev->sector_size = 0;
682 blkdev->capacity = 0;
683 blkdev->media_not_present = 0; // assume a disk is present
685 blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL);
691 memset(blkvsc_req, 0, sizeof(struct blkvsc_request));
692 page_buf = alloc_page(GFP_KERNEL);
695 kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
699 init_waitqueue_head(&blkvsc_req->wevent);
700 blkvsc_req->dev = blkdev;
701 blkvsc_req->req = NULL;
702 blkvsc_req->write = 0;
704 blkvsc_req->request.DataBuffer.PfnArray[0] = page_to_pfn(page_buf);
705 blkvsc_req->request.DataBuffer.Offset = 0;
706 blkvsc_req->request.DataBuffer.Length = 12;
708 blkvsc_req->cmnd[0] = 0x9E; //READ_CAPACITY16;
709 blkvsc_req->cmd_len = 16;
711 // Set this here since the completion routine may be invoked and completed before we return
714 blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion);
716 DPRINT_DBG(BLKVSC_DRV, "waiting %p to complete - cond %d\n", blkvsc_req, blkvsc_req->cond);
718 wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond);
721 if (blkvsc_req->request.Status)
723 scsi_normalize_sense(blkvsc_req->sense_buffer, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
725 if (sense_hdr.asc == 0x3A) // Medium not present
727 blkdev->media_not_present = 1;
732 buf = kmap(page_buf);
735 blkdev->capacity = be64_to_cpu(*(unsigned long long*) &buf[0]) + 1;
736 blkdev->sector_size = be32_to_cpu(*(unsigned int*)&buf[8]);
738 //blkdev->capacity = ((buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]) + 1;
739 //blkdev->sector_size = (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7];
743 __free_page(page_buf);
745 kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
752 Name: blkvsc_remove()
754 Desc: Callback when our device is removed
757 static int blkvsc_remove(struct device *device)
761 struct driver_context *driver_ctx = driver_to_driver_context(device->driver);
762 struct blkvsc_driver_context *blkvsc_drv_ctx = (struct blkvsc_driver_context*)driver_ctx;
763 STORVSC_DRIVER_OBJECT* storvsc_drv_obj = &blkvsc_drv_ctx->drv_obj;
765 struct device_context *device_ctx = device_to_device_context(device);
766 DEVICE_OBJECT* device_obj = &device_ctx->device_obj;
767 struct block_device_context *blkdev = dev_get_drvdata(device);
770 DPRINT_ENTER(BLKVSC_DRV);
772 DPRINT_DBG(BLKVSC_DRV, "blkvsc_remove()\n");
774 if (!storvsc_drv_obj->Base.OnDeviceRemove)
776 DPRINT_EXIT(BLKVSC_DRV);
780 // Call to the vsc driver to let it know that the device is being removed
781 ret = storvsc_drv_obj->Base.OnDeviceRemove(device_obj);
785 DPRINT_ERR(BLKVSC_DRV, "unable to remove blkvsc device (ret %d)", ret);
788 // Get to a known state
789 spin_lock_irqsave(&blkdev->lock, flags);
791 blkdev->shutting_down = 1;
793 blk_stop_queue(blkdev->gd->queue);
795 spin_unlock_irqrestore(&blkdev->lock, flags);
797 while (blkdev->num_outstanding_reqs)
799 DPRINT_INFO(STORVSC, "waiting for %d requests to complete...", blkdev->num_outstanding_reqs);
804 blkvsc_do_flush(blkdev);
806 spin_lock_irqsave(&blkdev->lock, flags);
808 blkvsc_cancel_pending_reqs(blkdev);
810 spin_unlock_irqrestore(&blkdev->lock, flags);
812 blk_cleanup_queue(blkdev->gd->queue);
814 del_gendisk(blkdev->gd);
816 kmem_cache_destroy(blkdev->request_pool);
820 DPRINT_EXIT(BLKVSC_DRV);
825 static void blkvsc_init_rw(struct blkvsc_request *blkvsc_req)
827 ASSERT(blkvsc_req->req);
828 ASSERT(blkvsc_req->sector_count <= (MAX_MULTIPAGE_BUFFER_COUNT*8));
830 blkvsc_req->cmd_len = 16;
832 if (blkvsc_req->sector_start > 0xffffffff)
834 if (rq_data_dir(blkvsc_req->req))
836 blkvsc_req->write = 1;
837 blkvsc_req->cmnd[0] = WRITE_16;
841 blkvsc_req->write = 0;
842 blkvsc_req->cmnd[0] = READ_16;
845 blkvsc_req->cmnd[1] |= blk_fua_rq(blkvsc_req->req) ? 0x8 : 0;
847 *(unsigned long long*)&blkvsc_req->cmnd[2] = cpu_to_be64(blkvsc_req->sector_start);
848 *(unsigned int*)&blkvsc_req->cmnd[10] = cpu_to_be32(blkvsc_req->sector_count);
850 else if ((blkvsc_req->sector_count > 0xff) || (blkvsc_req->sector_start > 0x1fffff))
852 if (rq_data_dir(blkvsc_req->req))
854 blkvsc_req->write = 1;
855 blkvsc_req->cmnd[0] = WRITE_10;
859 blkvsc_req->write = 0;
860 blkvsc_req->cmnd[0] = READ_10;
863 blkvsc_req->cmnd[1] |= blk_fua_rq(blkvsc_req->req) ? 0x8 : 0;
865 *(unsigned int *)&blkvsc_req->cmnd[2] = cpu_to_be32(blkvsc_req->sector_start);
866 *(unsigned short*)&blkvsc_req->cmnd[7] = cpu_to_be16(blkvsc_req->sector_count);
870 if (rq_data_dir(blkvsc_req->req))
872 blkvsc_req->write = 1;
873 blkvsc_req->cmnd[0] = WRITE_6;
877 blkvsc_req->write = 0;
878 blkvsc_req->cmnd[0] = READ_6;
881 *(unsigned int *)&blkvsc_req->cmnd[1] = cpu_to_be32(blkvsc_req->sector_start) >> 8;
882 blkvsc_req->cmnd[1] &= 0x1f;
883 blkvsc_req->cmnd[4] = (unsigned char) blkvsc_req->sector_count;
887 static int blkvsc_submit_request(struct blkvsc_request *blkvsc_req, void (*request_completion)(STORVSC_REQUEST*) )
889 struct block_device_context *blkdev = blkvsc_req->dev;
890 struct device_context *device_ctx=blkdev->device_ctx;
891 struct driver_context *driver_ctx = driver_to_driver_context(device_ctx->device.driver);
892 struct blkvsc_driver_context *blkvsc_drv_ctx = (struct blkvsc_driver_context*)driver_ctx;
893 STORVSC_DRIVER_OBJECT* storvsc_drv_obj = &blkvsc_drv_ctx->drv_obj;
896 STORVSC_REQUEST *storvsc_req;
898 DPRINT_DBG(BLKVSC_DRV, "blkvsc_submit_request() - req %p type %s start_sector %llu count %ld offset %d len %d\n",
900 (blkvsc_req->write)?"WRITE":"READ",
901 blkvsc_req->sector_start,
902 blkvsc_req->sector_count,
903 blkvsc_req->request.DataBuffer.Offset,
904 blkvsc_req->request.DataBuffer.Length);
906 /*for (i=0; i < (blkvsc_req->request.DataBuffer.Length >> 12); i++)
908 DPRINT_DBG(BLKVSC_DRV, "blkvsc_submit_request() - req %p pfn[%d] %llx\n",
911 blkvsc_req->request.DataBuffer.PfnArray[i]);
914 storvsc_req = &blkvsc_req->request;
915 storvsc_req->Extension = (void*)((unsigned long)blkvsc_req + sizeof(struct blkvsc_request));
917 storvsc_req->Type = blkvsc_req->write? WRITE_TYPE : READ_TYPE;
919 storvsc_req->OnIOCompletion = request_completion;
920 storvsc_req->Context = blkvsc_req;
922 storvsc_req->Host = blkdev->port;
923 storvsc_req->Bus = blkdev->path;
924 storvsc_req->TargetId = blkdev->target;
925 storvsc_req->LunId = 0; // this is not really used at all
927 storvsc_req->CdbLen = blkvsc_req->cmd_len;
928 storvsc_req->Cdb = blkvsc_req->cmnd;
930 storvsc_req->SenseBuffer = blkvsc_req->sense_buffer;
931 storvsc_req->SenseBufferSize = SCSI_SENSE_BUFFERSIZE;
933 ret = storvsc_drv_obj->OnIORequest(&blkdev->device_ctx->device_obj, &blkvsc_req->request);
936 blkdev->num_outstanding_reqs++;
943 // We break the request into 1 or more blkvsc_requests and submit them.
944 // If we cant submit them all, we put them on the pending_list. The
945 // blkvsc_request() will work on the pending_list.
947 static int blkvsc_do_request(struct block_device_context *blkdev, struct request *req)
949 struct bio *bio=NULL;
950 struct bio_vec *bvec=NULL;
951 struct bio_vec *prev_bvec=NULL;
953 struct blkvsc_request *blkvsc_req=NULL;
954 struct blkvsc_request *tmp;
958 sector_t start_sector;
959 unsigned long num_sectors = 0;
962 struct blkvsc_request_group *group=NULL;
964 DPRINT_DBG(BLKVSC_DRV, "blkdev %p req %p sect %llu \n", blkdev, req, blk_rq_pos(req));
966 // Create a group to tie req to list of blkvsc_reqs
967 group = (struct blkvsc_request_group*)kmem_cache_alloc(blkdev->request_pool, GFP_ATOMIC);
973 INIT_LIST_HEAD(&group->blkvsc_req_list);
974 group->outstanding = group->status = 0;
976 start_sector = blk_rq_pos(req);
978 // foreach bio in the request
980 for (bio = req->bio; bio; bio = bio->bi_next)
982 // Map this bio into an existing or new storvsc request
983 bio_for_each_segment (bvec, bio, seg_idx)
985 DPRINT_DBG(BLKVSC_DRV, "bio_for_each_segment() - req %p bio %p bvec %p seg_idx %d databuf_idx %d\n",
986 req, bio, bvec, seg_idx, databuf_idx);
988 // Get a new storvsc request
989 if ( (!blkvsc_req) || // 1st-time
990 (databuf_idx >= MAX_MULTIPAGE_BUFFER_COUNT) ||
991 (bvec->bv_offset != 0) || // hole at the begin of page
992 (prev_bvec && (prev_bvec->bv_len != PAGE_SIZE)) ) // hold at the end of page
994 // submit the prev one
997 blkvsc_req->sector_start = start_sector;
998 sector_div(blkvsc_req->sector_start, (blkdev->sector_size >> 9));
1000 blkvsc_req->sector_count = num_sectors / (blkdev->sector_size >> 9);
1002 blkvsc_init_rw(blkvsc_req);
1005 // Create new blkvsc_req to represent the current bvec
1006 blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_ATOMIC);
1009 // free up everything
1010 list_for_each_entry_safe(blkvsc_req, tmp, &group->blkvsc_req_list, req_entry)
1012 list_del(&blkvsc_req->req_entry);
1013 kmem_cache_free(blkdev->request_pool, blkvsc_req);
1016 kmem_cache_free(blkdev->request_pool, group);
1020 memset(blkvsc_req, 0, sizeof(struct blkvsc_request));
1022 blkvsc_req->dev = blkdev;
1023 blkvsc_req->req = req;
1024 blkvsc_req->request.DataBuffer.Offset = bvec->bv_offset;
1025 blkvsc_req->request.DataBuffer.Length = 0;
1028 blkvsc_req->group = group;
1029 blkvsc_req->group->outstanding++;
1030 list_add_tail(&blkvsc_req->req_entry, &blkvsc_req->group->blkvsc_req_list);
1032 start_sector += num_sectors;
1037 // Add the curr bvec/segment to the curr blkvsc_req
1038 blkvsc_req->request.DataBuffer.PfnArray[databuf_idx] = page_to_pfn(bvec->bv_page);
1039 blkvsc_req->request.DataBuffer.Length += bvec->bv_len;
1044 num_sectors += bvec->bv_len >> 9;
1046 } // bio_for_each_segment
1048 } // rq_for_each_bio
1050 // Handle the last one
1053 DPRINT_DBG(BLKVSC_DRV, "blkdev %p req %p group %p count %d\n", blkdev, req, blkvsc_req->group, blkvsc_req->group->outstanding);
1055 blkvsc_req->sector_start = start_sector;
1056 sector_div(blkvsc_req->sector_start, (blkdev->sector_size >> 9));
1058 blkvsc_req->sector_count = num_sectors / (blkdev->sector_size >> 9);
1060 blkvsc_init_rw(blkvsc_req);
1063 list_for_each_entry(blkvsc_req, &group->blkvsc_req_list, req_entry)
1067 DPRINT_DBG(BLKVSC_DRV, "adding blkvsc_req to pending_list - blkvsc_req %p start_sect %llu sect_count %ld (%llu %ld)\n",
1068 blkvsc_req, blkvsc_req->sector_start, blkvsc_req->sector_count, start_sector, num_sectors);
1070 list_add_tail(&blkvsc_req->pend_entry, &blkdev->pending_list);
1074 ret = blkvsc_submit_request(blkvsc_req, blkvsc_request_completion);
1078 list_add_tail(&blkvsc_req->pend_entry, &blkdev->pending_list);
1081 DPRINT_DBG(BLKVSC_DRV, "submitted blkvsc_req %p start_sect %llu sect_count %ld (%llu %ld) ret %d\n",
1082 blkvsc_req, blkvsc_req->sector_start, blkvsc_req->sector_count, start_sector, num_sectors, ret);
1089 static void blkvsc_cmd_completion(STORVSC_REQUEST* request)
1091 struct blkvsc_request *blkvsc_req=(struct blkvsc_request*)request->Context;
1092 struct block_device_context *blkdev = (struct block_device_context*)blkvsc_req->dev;
1094 struct scsi_sense_hdr sense_hdr;
1096 DPRINT_DBG(BLKVSC_DRV, "blkvsc_cmd_completion() - req %p\n", blkvsc_req);
1098 blkdev->num_outstanding_reqs--;
1100 if (blkvsc_req->request.Status)
1102 if (scsi_normalize_sense(blkvsc_req->sense_buffer, SCSI_SENSE_BUFFERSIZE, &sense_hdr))
1104 scsi_print_sense_hdr("blkvsc", &sense_hdr);
1108 blkvsc_req->cond =1;
1109 wake_up_interruptible(&blkvsc_req->wevent);
1112 static void blkvsc_request_completion(STORVSC_REQUEST* request)
1114 struct blkvsc_request *blkvsc_req=(struct blkvsc_request*)request->Context;
1115 struct block_device_context *blkdev = (struct block_device_context*)blkvsc_req->dev;
1116 unsigned long flags;
1117 struct blkvsc_request *comp_req, *tmp;
1119 ASSERT(blkvsc_req->group);
1121 DPRINT_DBG(BLKVSC_DRV, "blkdev %p blkvsc_req %p group %p type %s sect_start %llu sect_count %ld len %d group outstd %d total outstd %d\n",
1125 (blkvsc_req->write)?"WRITE":"READ",
1126 blkvsc_req->sector_start,
1127 blkvsc_req->sector_count,
1128 blkvsc_req->request.DataBuffer.Length,
1129 blkvsc_req->group->outstanding,
1130 blkdev->num_outstanding_reqs);
1132 spin_lock_irqsave(&blkdev->lock, flags);
1134 blkdev->num_outstanding_reqs--;
1135 blkvsc_req->group->outstanding--;
1137 // Only start processing when all the blkvsc_reqs are completed. This guarantees no out-of-order
1138 // blkvsc_req completion when calling end_that_request_first()
1139 if (blkvsc_req->group->outstanding == 0)
1141 list_for_each_entry_safe(comp_req, tmp, &blkvsc_req->group->blkvsc_req_list, req_entry)
1143 DPRINT_DBG(BLKVSC_DRV, "completing blkvsc_req %p sect_start %llu sect_count %ld \n",
1145 comp_req->sector_start,
1146 comp_req->sector_count);
1148 list_del(&comp_req->req_entry);
1150 if (!__blk_end_request(
1152 (!comp_req->request.Status ? 0: -EIO),
1153 comp_req->sector_count * blkdev->sector_size))
1155 //All the sectors have been xferred ie the request is done
1156 DPRINT_DBG(BLKVSC_DRV, "req %p COMPLETED\n", comp_req->req);
1157 kmem_cache_free(blkdev->request_pool, comp_req->group);
1160 kmem_cache_free(blkdev->request_pool, comp_req);
1163 if (!blkdev->shutting_down)
1165 blkvsc_do_pending_reqs(blkdev);
1166 blk_start_queue(blkdev->gd->queue);
1167 blkvsc_request(blkdev->gd->queue);
1171 spin_unlock_irqrestore(&blkdev->lock, flags);
1174 static int blkvsc_cancel_pending_reqs(struct block_device_context *blkdev)
1176 struct blkvsc_request *pend_req, *tmp;
1177 struct blkvsc_request *comp_req, *tmp2;
1181 DPRINT_DBG(BLKVSC_DRV, "blkvsc_cancel_pending_reqs()");
1183 // Flush the pending list first
1184 list_for_each_entry_safe(pend_req, tmp, &blkdev->pending_list, pend_entry)
1186 // The pend_req could be part of a partially completed request. If so, complete those req first
1187 // until we hit the pend_req
1188 list_for_each_entry_safe(comp_req, tmp2, &pend_req->group->blkvsc_req_list, req_entry)
1190 DPRINT_DBG(BLKVSC_DRV, "completing blkvsc_req %p sect_start %llu sect_count %ld \n",
1192 comp_req->sector_start,
1193 comp_req->sector_count);
1195 if (comp_req == pend_req)
1198 list_del(&comp_req->req_entry);
1202 ret = __blk_end_request(
1204 (!comp_req->request.Status ? 0 : -EIO),
1205 comp_req->sector_count * blkdev->sector_size);
1209 kmem_cache_free(blkdev->request_pool, comp_req);
1212 DPRINT_DBG(BLKVSC_DRV, "cancelling pending request - %p\n", pend_req);
1214 list_del(&pend_req->pend_entry);
1216 list_del(&pend_req->req_entry);
1220 if (!__blk_end_request(
1223 pend_req->sector_count * blkdev->sector_size))
1225 //All the sectors have been xferred ie the request is done
1226 DPRINT_DBG(BLKVSC_DRV, "blkvsc_cancel_pending_reqs() - req %p COMPLETED\n", pend_req->req);
1227 kmem_cache_free(blkdev->request_pool, pend_req->group);
1231 kmem_cache_free(blkdev->request_pool, pend_req);
1237 static int blkvsc_do_pending_reqs(struct block_device_context *blkdev)
1239 struct blkvsc_request *pend_req, *tmp;
1242 // Flush the pending list first
1243 list_for_each_entry_safe(pend_req, tmp, &blkdev->pending_list, pend_entry)
1245 DPRINT_DBG(BLKVSC_DRV, "working off pending_list - %p\n", pend_req);
1247 ret = blkvsc_submit_request(pend_req, blkvsc_request_completion);
1254 list_del(&pend_req->pend_entry);
1261 static void blkvsc_request(struct request_queue *queue)
1263 struct block_device_context *blkdev = NULL;
1264 struct request *req;
1267 DPRINT_DBG(BLKVSC_DRV, "- enter \n");
1268 while ((req = blk_peek_request(queue)) != NULL)
1270 DPRINT_DBG(BLKVSC_DRV, "- req %p\n", req);
1272 blkdev = req->rq_disk->private_data;
1273 if (blkdev->shutting_down || !blk_fs_request(req) || blkdev->media_not_present) {
1274 __blk_end_request_cur(req, 0);
1278 ret = blkvsc_do_pending_reqs(blkdev);
1282 DPRINT_DBG(BLKVSC_DRV, "- stop queue - pending_list not empty\n");
1283 blk_stop_queue(queue);
1287 blk_start_request(req);
1289 ret = blkvsc_do_request(blkdev, req);
1292 DPRINT_DBG(BLKVSC_DRV, "- stop queue - no room\n");
1293 blk_stop_queue(queue);
1298 DPRINT_DBG(BLKVSC_DRV, "- stop queue - no mem\n");
1299 blk_requeue_request(queue, req);
1300 blk_stop_queue(queue);
1306 static int blkvsc_open(struct inode *inode, struct file *filep)
1308 struct block_device_context *blkdev = inode->i_bdev->bd_disk->private_data;
1310 DPRINT_DBG(BLKVSC_DRV, "- users %d disk %s\n", blkdev->users, blkdev->gd->disk_name);
1312 spin_lock(&blkdev->lock);
1314 if (!blkdev->users && blkdev->device_type == DVD_TYPE)
1316 spin_unlock(&blkdev->lock);
1317 check_disk_change(inode->i_bdev);
1318 spin_lock(&blkdev->lock);
1323 spin_unlock(&blkdev->lock);
1327 static int blkvsc_release(struct inode *inode, struct file *filep)
1329 struct block_device_context *blkdev = inode->i_bdev->bd_disk->private_data;
1331 DPRINT_DBG(BLKVSC_DRV, "- users %d disk %s\n", blkdev->users, blkdev->gd->disk_name);
1333 spin_lock(&blkdev->lock);
1334 if (blkdev->users == 1)
1336 spin_unlock(&blkdev->lock);
1337 blkvsc_do_flush(blkdev);
1338 spin_lock(&blkdev->lock);
1343 spin_unlock(&blkdev->lock);
1347 static int blkvsc_media_changed(struct gendisk *gd)
1349 DPRINT_DBG(BLKVSC_DRV, "- enter\n");
1354 static int blkvsc_revalidate_disk(struct gendisk *gd)
1356 struct block_device_context *blkdev = gd->private_data;
1358 DPRINT_DBG(BLKVSC_DRV, "- enter\n");
1360 if (blkdev->device_type == DVD_TYPE)
1362 blkvsc_do_read_capacity(blkdev);
1363 set_capacity(blkdev->gd, blkdev->capacity * (blkdev->sector_size/512));
1364 blk_queue_logical_block_size(gd->queue, blkdev->sector_size);
1369 int blkvsc_getgeo(struct block_device *bd, struct hd_geometry *hg)
1371 sector_t total_sectors = get_capacity(bd->bd_disk);
1372 sector_t cylinder_times_heads=0;
1375 int sectors_per_track=0;
1380 if (total_sectors > (65535 * 16 * 255)) {
1381 total_sectors = (65535 * 16 * 255);
1384 if (total_sectors >= (65535 * 16 * 63)) {
1385 sectors_per_track = 255;
1388 cylinder_times_heads = total_sectors;
1389 rem = sector_div(cylinder_times_heads, sectors_per_track); // sector_div stores the quotient in cylinder_times_heads
1393 sectors_per_track = 17;
1395 cylinder_times_heads = total_sectors;
1396 rem = sector_div(cylinder_times_heads, sectors_per_track); // sector_div stores the quotient in cylinder_times_heads
1398 temp = cylinder_times_heads + 1023;
1399 rem = sector_div(temp, 1024); // sector_div stores the quotient in temp
1407 if (cylinder_times_heads >= (heads * 1024) || (heads > 16)) {
1408 sectors_per_track = 31;
1411 cylinder_times_heads = total_sectors;
1412 rem = sector_div(cylinder_times_heads, sectors_per_track); // sector_div stores the quotient in cylinder_times_heads
1415 if (cylinder_times_heads >= (heads * 1024)) {
1416 sectors_per_track = 63;
1419 cylinder_times_heads = total_sectors;
1420 rem = sector_div(cylinder_times_heads, sectors_per_track); // sector_div stores the quotient in cylinder_times_heads
1424 temp = cylinder_times_heads;
1425 rem = sector_div(temp, heads); // sector_div stores the quotient in temp
1429 hg->sectors = sectors_per_track;
1430 hg->cylinders = cylinders;
1432 DPRINT_INFO(BLKVSC_DRV, "CHS (%d, %d, %d)", cylinders, heads, sectors_per_track);
1437 static int blkvsc_ioctl(struct inode *inode, struct file *filep, unsigned cmd, unsigned long arg)
1439 struct block_device *bd = inode->i_bdev;
1440 struct block_device_context *blkdev = bd->bd_disk->private_data;
1445 // TODO: I think there is certain format for HDIO_GET_IDENTITY rather than just
1446 // a GUID. Commented it out for now.
1447 /*case HDIO_GET_IDENTITY:
1448 DPRINT_INFO(BLKVSC_DRV, "HDIO_GET_IDENTITY\n");
1450 if (copy_to_user((void __user *)arg, blkdev->device_id, blkdev->device_id_len))
1465 MODULE_LICENSE("GPL");
1467 static int __init blkvsc_init(void)
1471 ASSERT(sizeof(sector_t) == 8); // Make sure CONFIG_LBD is set
1473 DPRINT_ENTER(BLKVSC_DRV);
1475 DPRINT_INFO(BLKVSC_DRV, "Blkvsc initializing....");
1477 ret = blkvsc_drv_init(BlkVscInitialize);
1479 DPRINT_EXIT(BLKVSC_DRV);
1484 static void __exit blkvsc_exit(void)
1486 DPRINT_ENTER(BLKVSC_DRV);
1490 DPRINT_ENTER(BLKVSC_DRV);
1493 module_param(blkvsc_ringbuffer_size, int, S_IRUGO);
1495 module_init(blkvsc_init);
1496 module_exit(blkvsc_exit);