2 * bsg.c - block layer implementation of the sg v3 interface
4 * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs
5 * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com>
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License version 2. See the file "COPYING" in the main directory of this
9 * archive for more details.
14 * - Should this get merged, block/scsi_ioctl.c will be migrated into
15 * this file. To keep maintenance down, it's easier to have them
16 * seperated right now.
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/file.h>
22 #include <linux/blkdev.h>
23 #include <linux/poll.h>
24 #include <linux/cdev.h>
25 #include <linux/percpu.h>
26 #include <linux/uio.h>
27 #include <linux/bsg.h>
29 #include <scsi/scsi.h>
30 #include <scsi/scsi_ioctl.h>
31 #include <scsi/scsi_cmnd.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_driver.h>
36 const static char bsg_version[] = "block layer sg (bsg) 0.4";
39 request_queue_t *queue;
41 struct list_head busy_list;
42 struct list_head done_list;
43 struct hlist_node dev_list;
48 wait_queue_head_t wq_done;
49 wait_queue_head_t wq_free;
50 char name[BUS_ID_SIZE];
60 #define BSG_DEFAULT_CMDS 64
61 #define BSG_MAX_DEVS 32768
66 #define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ##args)
68 #define dprintk(fmt, args...)
71 static DEFINE_MUTEX(bsg_mutex);
72 static int bsg_device_nr, bsg_minor_idx;
74 #define BSG_LIST_ARRAY_SIZE 8
75 static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE];
77 static struct class *bsg_class;
78 static LIST_HEAD(bsg_class_list);
81 static struct kmem_cache *bsg_cmd_cachep;
84 * our internal command type
87 struct bsg_device *bd;
88 struct list_head list;
94 struct sg_io_v4 __user *uhdr;
95 char sense[SCSI_SENSE_BUFFERSIZE];
98 static void bsg_free_command(struct bsg_command *bc)
100 struct bsg_device *bd = bc->bd;
103 kmem_cache_free(bsg_cmd_cachep, bc);
105 spin_lock_irqsave(&bd->lock, flags);
107 spin_unlock_irqrestore(&bd->lock, flags);
109 wake_up(&bd->wq_free);
112 static struct bsg_command *bsg_alloc_command(struct bsg_device *bd)
114 struct bsg_command *bc = ERR_PTR(-EINVAL);
116 spin_lock_irq(&bd->lock);
118 if (bd->queued_cmds >= bd->max_queue)
122 spin_unlock_irq(&bd->lock);
124 bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL);
126 spin_lock_irq(&bd->lock);
128 bc = ERR_PTR(-ENOMEM);
133 INIT_LIST_HEAD(&bc->list);
134 dprintk("%s: returning free cmd %p\n", bd->name, bc);
137 spin_unlock_irq(&bd->lock);
141 static inline struct hlist_head *bsg_dev_idx_hash(int index)
143 return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)];
146 static int bsg_io_schedule(struct bsg_device *bd)
151 spin_lock_irq(&bd->lock);
153 BUG_ON(bd->done_cmds > bd->queued_cmds);
156 * -ENOSPC or -ENODATA? I'm going for -ENODATA, meaning "I have no
157 * work to do", even though we return -ENOSPC after this same test
158 * during bsg_write() -- there, it means our buffer can't have more
159 * bsg_commands added to it, thus has no space left.
161 if (bd->done_cmds == bd->queued_cmds) {
166 if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
171 prepare_to_wait(&bd->wq_done, &wait, TASK_UNINTERRUPTIBLE);
172 spin_unlock_irq(&bd->lock);
174 finish_wait(&bd->wq_done, &wait);
178 spin_unlock_irq(&bd->lock);
182 static int blk_fill_sgv4_hdr_rq(request_queue_t *q, struct request *rq,
183 struct sg_io_v4 *hdr, int has_write_perm)
185 memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
187 if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
191 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
192 if (blk_verify_command(rq->cmd, has_write_perm))
194 } else if (!capable(CAP_SYS_RAWIO))
198 * fill in request structure
200 rq->cmd_len = hdr->request_len;
201 rq->cmd_type = REQ_TYPE_BLOCK_PC;
203 rq->timeout = (hdr->timeout * HZ) / 1000;
205 rq->timeout = q->sg_timeout;
207 rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
213 * Check if sg_io_v4 from user is allowed and valid
216 bsg_validate_sgv4_hdr(request_queue_t *q, struct sg_io_v4 *hdr, int *rw)
220 if (hdr->guard != 'Q')
222 if (hdr->request_len > BLK_MAX_CDB)
224 if (hdr->dout_xfer_len > (q->max_sectors << 9) ||
225 hdr->din_xfer_len > (q->max_sectors << 9))
228 switch (hdr->protocol) {
229 case BSG_PROTOCOL_SCSI:
230 switch (hdr->subprotocol) {
231 case BSG_SUB_PROTOCOL_SCSI_CMD:
232 case BSG_SUB_PROTOCOL_SCSI_TRANSPORT:
242 *rw = hdr->dout_xfer_len ? WRITE : READ;
247 * map sg_io_v4 to a request.
249 static struct request *
250 bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr)
252 request_queue_t *q = bd->queue;
253 struct request *rq, *next_rq = NULL;
255 unsigned int dxfer_len;
258 dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp,
259 hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp,
262 ret = bsg_validate_sgv4_hdr(q, hdr, &rw);
267 * map scatter-gather elements seperately and string them to request
269 rq = blk_get_request(q, rw, GFP_KERNEL);
271 return ERR_PTR(-ENOMEM);
272 ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, test_bit(BSG_F_WRITE_PERM,
277 if (rw == WRITE && hdr->din_xfer_len) {
278 if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
283 next_rq = blk_get_request(q, READ, GFP_KERNEL);
288 rq->next_rq = next_rq;
290 dxferp = (void*)(unsigned long)hdr->din_xferp;
291 ret = blk_rq_map_user(q, next_rq, dxferp, hdr->din_xfer_len);
296 if (hdr->dout_xfer_len) {
297 dxfer_len = hdr->dout_xfer_len;
298 dxferp = (void*)(unsigned long)hdr->dout_xferp;
299 } else if (hdr->din_xfer_len) {
300 dxfer_len = hdr->din_xfer_len;
301 dxferp = (void*)(unsigned long)hdr->din_xferp;
306 ret = blk_rq_map_user(q, rq, dxferp, dxfer_len);
314 blk_rq_unmap_user(next_rq->bio);
315 blk_put_request(next_rq);
321 * async completion call-back from the block layer, when scsi/ide/whatever
322 * calls end_that_request_last() on a request
324 static void bsg_rq_end_io(struct request *rq, int uptodate)
326 struct bsg_command *bc = rq->end_io_data;
327 struct bsg_device *bd = bc->bd;
330 dprintk("%s: finished rq %p bc %p, bio %p stat %d\n",
331 bd->name, rq, bc, bc->bio, uptodate);
333 bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration);
335 spin_lock_irqsave(&bd->lock, flags);
336 list_move_tail(&bc->list, &bd->done_list);
338 spin_unlock_irqrestore(&bd->lock, flags);
340 wake_up(&bd->wq_done);
344 * do final setup of a 'bc' and submit the matching 'rq' to the block
347 static void bsg_add_command(struct bsg_device *bd, request_queue_t *q,
348 struct bsg_command *bc, struct request *rq)
350 rq->sense = bc->sense;
354 * add bc command to busy queue and submit rq for io
359 bc->bidi_bio = rq->next_rq->bio;
360 bc->hdr.duration = jiffies;
361 spin_lock_irq(&bd->lock);
362 list_add_tail(&bc->list, &bd->busy_list);
363 spin_unlock_irq(&bd->lock);
365 dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc);
367 rq->end_io_data = bc;
368 blk_execute_rq_nowait(q, NULL, rq, 1, bsg_rq_end_io);
371 static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd)
373 struct bsg_command *bc = NULL;
375 spin_lock_irq(&bd->lock);
377 bc = list_entry(bd->done_list.next, struct bsg_command, list);
381 spin_unlock_irq(&bd->lock);
387 * Get a finished command from the done list
389 static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd)
391 struct bsg_command *bc;
395 bc = bsg_next_done_cmd(bd);
399 if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
400 bc = ERR_PTR(-EAGAIN);
404 ret = wait_event_interruptible(bd->wq_done, bd->done_cmds);
406 bc = ERR_PTR(-ERESTARTSYS);
411 dprintk("%s: returning done %p\n", bd->name, bc);
416 static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
417 struct bio *bio, struct bio *bidi_bio)
421 dprintk("rq %p bio %p %u\n", rq, bio, rq->errors);
423 * fill in all the output members
425 hdr->device_status = status_byte(rq->errors);
426 hdr->transport_status = host_byte(rq->errors);
427 hdr->driver_status = driver_byte(rq->errors);
429 if (hdr->device_status || hdr->transport_status || hdr->driver_status)
430 hdr->info |= SG_INFO_CHECK;
431 hdr->din_resid = rq->data_len;
432 hdr->response_len = 0;
434 if (rq->sense_len && hdr->response) {
435 int len = min_t(unsigned int, hdr->max_response_len,
438 ret = copy_to_user((void*)(unsigned long)hdr->response,
441 hdr->response_len = len;
447 blk_rq_unmap_user(bidi_bio);
448 blk_put_request(rq->next_rq);
451 blk_rq_unmap_user(bio);
457 static int bsg_complete_all_commands(struct bsg_device *bd)
459 struct bsg_command *bc;
462 dprintk("%s: entered\n", bd->name);
464 set_bit(BSG_F_BLOCK, &bd->flags);
467 * wait for all commands to complete
471 ret = bsg_io_schedule(bd);
473 * look for -ENODATA specifically -- we'll sometimes get
474 * -ERESTARTSYS when we've taken a signal, but we can't
475 * return until we're done freeing the queue, so ignore
476 * it. The signal will get handled when we're done freeing
479 } while (ret != -ENODATA);
482 * discard done commands
486 spin_lock_irq(&bd->lock);
487 if (!bd->queued_cmds) {
488 spin_unlock_irq(&bd->lock);
491 spin_unlock_irq(&bd->lock);
493 bc = bsg_get_done_cmd(bd);
497 tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
502 bsg_free_command(bc);
509 __bsg_read(char __user *buf, size_t count, struct bsg_device *bd,
510 const struct iovec *iov, ssize_t *bytes_read)
512 struct bsg_command *bc;
513 int nr_commands, ret;
515 if (count % sizeof(struct sg_io_v4))
519 nr_commands = count / sizeof(struct sg_io_v4);
520 while (nr_commands) {
521 bc = bsg_get_done_cmd(bd);
528 * this is the only case where we need to copy data back
529 * after completing the request. so do that here,
530 * bsg_complete_work() cannot do that for us
532 ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
535 if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr)))
538 bsg_free_command(bc);
543 buf += sizeof(struct sg_io_v4);
544 *bytes_read += sizeof(struct sg_io_v4);
551 static inline void bsg_set_block(struct bsg_device *bd, struct file *file)
553 if (file->f_flags & O_NONBLOCK)
554 clear_bit(BSG_F_BLOCK, &bd->flags);
556 set_bit(BSG_F_BLOCK, &bd->flags);
559 static inline void bsg_set_write_perm(struct bsg_device *bd, struct file *file)
561 if (file->f_mode & FMODE_WRITE)
562 set_bit(BSG_F_WRITE_PERM, &bd->flags);
564 clear_bit(BSG_F_WRITE_PERM, &bd->flags);
568 * Check if the error is a "real" error that we should return.
570 static inline int err_block_err(int ret)
572 if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN)
579 bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
581 struct bsg_device *bd = file->private_data;
585 dprintk("%s: read %Zd bytes\n", bd->name, count);
587 bsg_set_block(bd, file);
589 ret = __bsg_read(buf, count, bd, NULL, &bytes_read);
592 if (!bytes_read || (bytes_read && err_block_err(ret)))
598 static int __bsg_write(struct bsg_device *bd, const char __user *buf,
599 size_t count, ssize_t *bytes_written)
601 struct bsg_command *bc;
603 int ret, nr_commands;
605 if (count % sizeof(struct sg_io_v4))
608 nr_commands = count / sizeof(struct sg_io_v4);
612 while (nr_commands) {
613 request_queue_t *q = bd->queue;
615 bc = bsg_alloc_command(bd);
622 bc->uhdr = (struct sg_io_v4 __user *) buf;
623 if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) {
629 * get a request, fill in the blanks, and add to request queue
631 rq = bsg_map_hdr(bd, &bc->hdr);
638 bsg_add_command(bd, q, bc, rq);
642 buf += sizeof(struct sg_io_v4);
643 *bytes_written += sizeof(struct sg_io_v4);
647 bsg_free_command(bc);
653 bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
655 struct bsg_device *bd = file->private_data;
656 ssize_t bytes_written;
659 dprintk("%s: write %Zd bytes\n", bd->name, count);
661 bsg_set_block(bd, file);
662 bsg_set_write_perm(bd, file);
665 ret = __bsg_write(bd, buf, count, &bytes_written);
666 *ppos = bytes_written;
669 * return bytes written on non-fatal errors
671 if (!bytes_written || (bytes_written && err_block_err(ret)))
674 dprintk("%s: returning %Zd\n", bd->name, bytes_written);
675 return bytes_written;
678 static struct bsg_device *bsg_alloc_device(void)
680 struct bsg_device *bd;
682 bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL);
686 spin_lock_init(&bd->lock);
688 bd->max_queue = BSG_DEFAULT_CMDS;
690 INIT_LIST_HEAD(&bd->busy_list);
691 INIT_LIST_HEAD(&bd->done_list);
692 INIT_HLIST_NODE(&bd->dev_list);
694 init_waitqueue_head(&bd->wq_free);
695 init_waitqueue_head(&bd->wq_done);
699 static int bsg_put_device(struct bsg_device *bd)
703 mutex_lock(&bsg_mutex);
705 if (!atomic_dec_and_test(&bd->ref_count))
708 dprintk("%s: tearing down\n", bd->name);
711 * close can always block
713 set_bit(BSG_F_BLOCK, &bd->flags);
716 * correct error detection baddies here again. it's the responsibility
717 * of the app to properly reap commands before close() if it wants
718 * fool-proof error detection
720 ret = bsg_complete_all_commands(bd);
722 blk_put_queue(bd->queue);
723 hlist_del(&bd->dev_list);
726 mutex_unlock(&bsg_mutex);
730 static struct bsg_device *bsg_add_device(struct inode *inode,
731 struct request_queue *rq,
734 struct bsg_device *bd;
736 unsigned char buf[32];
739 bd = bsg_alloc_device();
741 return ERR_PTR(-ENOMEM);
744 kobject_get(&rq->kobj);
745 bsg_set_block(bd, file);
747 atomic_set(&bd->ref_count, 1);
748 bd->minor = iminor(inode);
749 mutex_lock(&bsg_mutex);
750 hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(bd->minor));
752 strncpy(bd->name, rq->bsg_dev.class_dev->class_id, sizeof(bd->name) - 1);
753 dprintk("bound to <%s>, max queue %d\n",
754 format_dev_t(buf, inode->i_rdev), bd->max_queue);
756 mutex_unlock(&bsg_mutex);
760 static struct bsg_device *__bsg_get_device(int minor)
762 struct bsg_device *bd = NULL;
763 struct hlist_node *entry;
765 mutex_lock(&bsg_mutex);
767 hlist_for_each(entry, bsg_dev_idx_hash(minor)) {
768 bd = hlist_entry(entry, struct bsg_device, dev_list);
769 if (bd->minor == minor) {
770 atomic_inc(&bd->ref_count);
777 mutex_unlock(&bsg_mutex);
781 static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file)
783 struct bsg_device *bd = __bsg_get_device(iminor(inode));
784 struct bsg_class_device *bcd, *__bcd;
790 * find the class device
793 mutex_lock(&bsg_mutex);
794 list_for_each_entry(__bcd, &bsg_class_list, list) {
795 if (__bcd->minor == iminor(inode)) {
800 mutex_unlock(&bsg_mutex);
803 return ERR_PTR(-ENODEV);
805 return bsg_add_device(inode, bcd->queue, file);
808 static int bsg_open(struct inode *inode, struct file *file)
810 struct bsg_device *bd = bsg_get_device(inode, file);
815 file->private_data = bd;
819 static int bsg_release(struct inode *inode, struct file *file)
821 struct bsg_device *bd = file->private_data;
823 file->private_data = NULL;
824 return bsg_put_device(bd);
827 static unsigned int bsg_poll(struct file *file, poll_table *wait)
829 struct bsg_device *bd = file->private_data;
830 unsigned int mask = 0;
832 poll_wait(file, &bd->wq_done, wait);
833 poll_wait(file, &bd->wq_free, wait);
835 spin_lock_irq(&bd->lock);
836 if (!list_empty(&bd->done_list))
837 mask |= POLLIN | POLLRDNORM;
838 if (bd->queued_cmds >= bd->max_queue)
840 spin_unlock_irq(&bd->lock);
845 static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
847 struct bsg_device *bd = file->private_data;
848 int __user *uarg = (int __user *) arg;
854 case SG_GET_COMMAND_Q:
855 return put_user(bd->max_queue, uarg);
856 case SG_SET_COMMAND_Q: {
859 if (get_user(queue, uarg))
864 spin_lock_irq(&bd->lock);
865 bd->max_queue = queue;
866 spin_unlock_irq(&bd->lock);
873 case SG_GET_VERSION_NUM:
874 case SCSI_IOCTL_GET_IDLUN:
875 case SCSI_IOCTL_GET_BUS_NUMBER:
878 case SG_GET_RESERVED_SIZE:
879 case SG_SET_RESERVED_SIZE:
880 case SG_EMULATED_HOST:
881 case SCSI_IOCTL_SEND_COMMAND: {
882 void __user *uarg = (void __user *) arg;
883 return scsi_cmd_ioctl(file, bd->queue, NULL, cmd, uarg);
887 struct bio *bio, *bidi_bio = NULL;
890 if (copy_from_user(&hdr, uarg, sizeof(hdr)))
893 rq = bsg_map_hdr(bd, &hdr);
899 bidi_bio = rq->next_rq->bio;
900 blk_execute_rq(bd->queue, NULL, rq, 0);
901 blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio);
903 if (copy_to_user(uarg, &hdr, sizeof(hdr)))
909 * block device ioctls
913 return ioctl_by_bdev(bd->bdev, cmd, arg);
920 static struct file_operations bsg_fops = {
925 .release = bsg_release,
926 .unlocked_ioctl = bsg_ioctl,
927 .owner = THIS_MODULE,
930 void bsg_unregister_queue(struct request_queue *q)
932 struct bsg_class_device *bcd = &q->bsg_dev;
934 WARN_ON(!bcd->class_dev);
936 mutex_lock(&bsg_mutex);
937 sysfs_remove_link(&q->kobj, "bsg");
938 class_device_destroy(bsg_class, MKDEV(bsg_major, bcd->minor));
939 bcd->class_dev = NULL;
940 list_del_init(&bcd->list);
942 mutex_unlock(&bsg_mutex);
944 EXPORT_SYMBOL_GPL(bsg_unregister_queue);
946 int bsg_register_queue(struct request_queue *q, const char *name)
948 struct bsg_class_device *bcd, *__bcd;
951 struct class_device *class_dev = NULL;
954 * we need a proper transport to send commands, not a stacked device
960 memset(bcd, 0, sizeof(*bcd));
961 INIT_LIST_HEAD(&bcd->list);
963 mutex_lock(&bsg_mutex);
964 if (bsg_device_nr == BSG_MAX_DEVS) {
965 printk(KERN_ERR "bsg: too many bsg devices\n");
970 list_for_each_entry(__bcd, &bsg_class_list, list) {
971 if (__bcd->minor == bsg_minor_idx) {
973 if (bsg_minor_idx == BSG_MAX_DEVS)
979 bcd->minor = bsg_minor_idx++;
980 if (bsg_minor_idx == BSG_MAX_DEVS)
984 dev = MKDEV(bsg_major, bcd->minor);
985 class_dev = class_device_create(bsg_class, NULL, dev, bcd->dev, "%s", name);
986 if (IS_ERR(class_dev)) {
987 ret = PTR_ERR(class_dev);
990 bcd->class_dev = class_dev;
993 ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg");
998 list_add_tail(&bcd->list, &bsg_class_list);
1001 mutex_unlock(&bsg_mutex);
1005 class_device_destroy(bsg_class, MKDEV(bsg_major, bcd->minor));
1006 mutex_unlock(&bsg_mutex);
1009 EXPORT_SYMBOL_GPL(bsg_register_queue);
1011 static int bsg_add(struct class_device *cl_dev, struct class_interface *cl_intf)
1014 struct scsi_device *sdp = to_scsi_device(cl_dev->dev);
1015 struct request_queue *rq = sdp->request_queue;
1017 if (rq->kobj.parent)
1018 ret = bsg_register_queue(rq, kobject_name(rq->kobj.parent));
1020 ret = bsg_register_queue(rq, kobject_name(&sdp->sdev_gendev.kobj));
1024 static void bsg_remove(struct class_device *cl_dev, struct class_interface *cl_intf)
1026 bsg_unregister_queue(to_scsi_device(cl_dev->dev)->request_queue);
1029 static struct class_interface bsg_intf = {
1031 .remove = bsg_remove,
1034 static struct cdev bsg_cdev = {
1035 .kobj = {.name = "bsg", },
1036 .owner = THIS_MODULE,
1039 static int __init bsg_init(void)
1044 bsg_cmd_cachep = kmem_cache_create("bsg_cmd",
1045 sizeof(struct bsg_command), 0, 0, NULL, NULL);
1046 if (!bsg_cmd_cachep) {
1047 printk(KERN_ERR "bsg: failed creating slab cache\n");
1051 for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++)
1052 INIT_HLIST_HEAD(&bsg_device_list[i]);
1054 bsg_class = class_create(THIS_MODULE, "bsg");
1055 if (IS_ERR(bsg_class)) {
1056 ret = PTR_ERR(bsg_class);
1057 goto destroy_kmemcache;
1060 ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg");
1062 goto destroy_bsg_class;
1064 bsg_major = MAJOR(devid);
1066 cdev_init(&bsg_cdev, &bsg_fops);
1067 ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS);
1069 goto unregister_chrdev;
1071 ret = scsi_register_interface(&bsg_intf);
1075 printk(KERN_INFO "%s loaded (major %d)\n", bsg_version, bsg_major);
1078 printk(KERN_ERR "bsg: failed register scsi interface %d\n", ret);
1079 cdev_del(&bsg_cdev);
1081 unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS);
1083 class_destroy(bsg_class);
1085 kmem_cache_destroy(bsg_cmd_cachep);
1089 MODULE_AUTHOR("Jens Axboe");
1090 MODULE_DESCRIPTION("Block layer SGSI generic (sg) driver");
1091 MODULE_LICENSE("GPL");
1093 device_initcall(bsg_init);