[SCSI] fix memory leak in initialization
[safe/jmp/linux-2.6] / drivers / scsi / scsi_transport_fc.c
index a272b9a..a67fed1 100644 (file)
 #include <linux/netlink.h>
 #include <net/netlink.h>
 #include <scsi/scsi_netlink_fc.h>
+#include <scsi/scsi_bsg_fc.h>
 #include "scsi_priv.h"
 #include "scsi_transport_fc_internal.h"
 
 static int fc_queue_work(struct Scsi_Host *, struct work_struct *);
 static void fc_vport_sched_delete(struct work_struct *work);
-
-/*
- * This is a temporary carrier for creating a vport. It will eventually
- * be replaced  by a real message definition for sgio or netlink.
- *
- * fc_vport_identifiers: This set of data contains all elements
- * to uniquely identify and instantiate a FC virtual port.
- *
- * Notes:
- *   symbolic_name: The driver is to append the symbolic_name string data
- *      to the symbolic_node_name data that it generates by default.
- *      the resulting combination should then be registered with the switch.
- *      It is expected that things like Xen may stuff a VM title into
- *      this field.
- */
-struct fc_vport_identifiers {
-       u64 node_name;
-       u64 port_name;
-       u32 roles;
-       bool disable;
-       enum fc_port_type vport_type;   /* only FC_PORTTYPE_NPIV allowed */
-       char symbolic_name[FC_VPORT_SYMBOLIC_NAMELEN];
-};
-
-static int fc_vport_create(struct Scsi_Host *shost, int channel,
+static int fc_vport_setup(struct Scsi_Host *shost, int channel,
        struct device *pdev, struct fc_vport_identifiers  *ids,
        struct fc_vport **vport);
+static int fc_bsg_hostadd(struct Scsi_Host *, struct fc_host_attrs *);
+static int fc_bsg_rportadd(struct Scsi_Host *, struct fc_rport *);
+static void fc_bsg_remove(struct request_queue *);
+static void fc_bsg_goose_queue(struct fc_rport *);
 
 /*
  * Redefine so that we can have same named attributes in the
@@ -119,7 +100,7 @@ static struct {
        { FC_PORTTYPE_NPORT,    "NPort (fabric via point-to-point)" },
        { FC_PORTTYPE_NLPORT,   "NLPort (fabric via loop)" },
        { FC_PORTTYPE_LPORT,    "LPort (private loop)" },
-       { FC_PORTTYPE_PTP,      "Point-To-Point (direct nport connection" },
+       { FC_PORTTYPE_PTP,      "Point-To-Point (direct nport connection)" },
        { FC_PORTTYPE_NPIV,             "NPIV VPORT" },
 };
 fc_enum_name_search(port_type, fc_port_type, fc_port_type_names)
@@ -310,7 +291,7 @@ static void fc_scsi_scan_rport(struct work_struct *work);
 #define FC_STARGET_NUM_ATTRS   3
 #define FC_RPORT_NUM_ATTRS     10
 #define FC_VPORT_NUM_ATTRS     9
-#define FC_HOST_NUM_ATTRS      21
+#define FC_HOST_NUM_ATTRS      22
 
 struct fc_internal {
        struct scsi_transport_template t;
@@ -435,13 +416,26 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
                return -ENOMEM;
        }
 
+       fc_bsg_hostadd(shost, fc_host);
+       /* ignore any bsg add error - we just can't do sgio */
+
+       return 0;
+}
+
+static int fc_host_remove(struct transport_container *tc, struct device *dev,
+                        struct device *cdev)
+{
+       struct Scsi_Host *shost = dev_to_shost(dev);
+       struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
+
+       fc_bsg_remove(fc_host->rqst_q);
        return 0;
 }
 
 static DECLARE_TRANSPORT_CLASS(fc_host_class,
                               "fc_host",
                               fc_host_setup,
-                              NULL,
+                              fc_host_remove,
                               NULL);
 
 /*
@@ -557,12 +551,8 @@ fc_host_post_event(struct Scsi_Host *shost, u32 event_number,
        event->event_code = event_code;
        event->event_data = event_data;
 
-       err = nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_FC_EVENTS,
-                             GFP_KERNEL);
-       if (err && (err != -ESRCH))     /* filter no recipient errors */
-               /* nlmsg_multicast already kfree_skb'd */
-               goto send_fail;
-
+       nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_FC_EVENTS,
+                       GFP_KERNEL);
        return;
 
 send_fail_skb:
@@ -571,7 +561,7 @@ send_fail:
        name = get_fc_host_event_code_name(event_code);
        printk(KERN_WARNING
                "%s: Dropped Event : host %d %s data 0x%08x - err %d\n",
-               __FUNCTION__, shost->host_no,
+               __func__, shost->host_no,
                (name) ? name : "<unknown>", event_data, err);
        return;
 }
@@ -631,12 +621,8 @@ fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number,
        event->event_code = FCH_EVT_VENDOR_UNIQUE;
        memcpy(&event->event_data, data_buf, data_len);
 
-       err = nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_FC_EVENTS,
-                             GFP_KERNEL);
-       if (err && (err != -ESRCH))     /* filter no recipient errors */
-               /* nlmsg_multicast already kfree_skb'd */
-               goto send_vendor_fail;
-
+       nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_FC_EVENTS,
+                       GFP_KERNEL);
        return;
 
 send_vendor_fail_skb:
@@ -644,7 +630,7 @@ send_vendor_fail_skb:
 send_vendor_fail:
        printk(KERN_WARNING
                "%s: Dropped Event : host %d vendor_unique - err %d\n",
-               __FUNCTION__, shost->host_no, err);
+               __func__, shost->host_no, err);
        return;
 }
 EXPORT_SYMBOL(fc_host_post_vendor_event);
@@ -1760,7 +1746,7 @@ store_fc_host_vport_create(struct device *dev, struct device_attribute *attr,
        vid.disable = false;            /* always enabled */
 
        /* we only allow support on Channel 0 !!! */
-       stat = fc_vport_create(shost, 0, &shost->shost_gendev, &vid, &vport);
+       stat = fc_vport_setup(shost, 0, &shost->shost_gendev, &vid, &vport);
        return stat ? stat : count;
 }
 static FC_DEVICE_ATTR(host, vport_create, S_IWUSR, NULL,
@@ -1950,15 +1936,15 @@ static int fc_vport_match(struct attribute_container *cont,
  * Notes:
  *     This routine assumes no locks are held on entry.
  */
-static enum scsi_eh_timer_return
+static enum blk_eh_timer_return
 fc_timed_out(struct scsi_cmnd *scmd)
 {
        struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device));
 
        if (rport->port_state == FC_PORTSTATE_BLOCKED)
-               return EH_RESET_TIMER;
+               return BLK_EH_RESET_TIMER;
 
-       return EH_NOT_HANDLED;
+       return BLK_EH_NOT_HANDLED;
 }
 
 /*
@@ -2157,8 +2143,7 @@ fc_attach_transport(struct fc_function_template *ft)
        SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(roles);
        SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_state);
        SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(scsi_target_id);
-       if (ft->terminate_rport_io)
-               SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(fast_io_fail_tmo);
+       SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(fast_io_fail_tmo);
 
        BUG_ON(count > FC_RPORT_NUM_ATTRS);
 
@@ -2352,6 +2337,22 @@ fc_remove_host(struct Scsi_Host *shost)
 }
 EXPORT_SYMBOL(fc_remove_host);
 
+static void fc_terminate_rport_io(struct fc_rport *rport)
+{
+       struct Scsi_Host *shost = rport_to_shost(rport);
+       struct fc_internal *i = to_fc_internal(shost->transportt);
+
+       /* Involve the LLDD if possible to terminate all io on the rport. */
+       if (i->f->terminate_rport_io)
+               i->f->terminate_rport_io(rport);
+
+       /*
+        * must unblock to flush queued IO. The caller will have set
+        * the port_state or flags, so that fc_remote_port_chkready will
+        * fail IO.
+        */
+       scsi_target_unblock(&rport->dev);
+}
 
 /**
  * fc_starget_delete - called to delete the scsi decendents of an rport
@@ -2364,13 +2365,8 @@ fc_starget_delete(struct work_struct *work)
 {
        struct fc_rport *rport =
                container_of(work, struct fc_rport, stgt_delete_work);
-       struct Scsi_Host *shost = rport_to_shost(rport);
-       struct fc_internal *i = to_fc_internal(shost->transportt);
-
-       /* Involve the LLDD if possible to terminate all io on the rport. */
-       if (i->f->terminate_rport_io)
-               i->f->terminate_rport_io(rport);
 
+       fc_terminate_rport_io(rport);
        scsi_remove_target(&rport->dev);
 }
 
@@ -2396,9 +2392,7 @@ fc_rport_final_delete(struct work_struct *work)
        if (rport->flags & FC_RPORT_SCAN_PENDING)
                scsi_flush_work(shost);
 
-       /* involve the LLDD to terminate all pending i/o */
-       if (i->f->terminate_rport_io)
-               i->f->terminate_rport_io(rport);
+       fc_terminate_rport_io(rport);
 
        /*
         * Cancel any outstanding timers. These should really exist
@@ -2424,10 +2418,16 @@ fc_rport_final_delete(struct work_struct *work)
        /*
         * Notify the driver that the rport is now dead. The LLDD will
         * also guarantee that any communication to the rport is terminated
+        *
+        * Avoid this call if we already called it when we preserved the
+        * rport for the binding.
         */
-       if (i->f->dev_loss_tmo_callbk)
+       if (!(rport->flags & FC_RPORT_DEVLOSS_CALLBK_DONE) &&
+           (i->f->dev_loss_tmo_callbk))
                i->f->dev_loss_tmo_callbk(rport);
 
+       fc_bsg_remove(rport->rqst_q);
+
        transport_remove_device(dev);
        device_del(dev);
        transport_destroy_device(dev);
@@ -2464,7 +2464,7 @@ fc_rport_create(struct Scsi_Host *shost, int channel,
        size = (sizeof(struct fc_rport) + fci->f->dd_fcrport_size);
        rport = kzalloc(size, GFP_KERNEL);
        if (unlikely(!rport)) {
-               printk(KERN_ERR "%s: allocation failure\n", __FUNCTION__);
+               printk(KERN_ERR "%s: allocation failure\n", __func__);
                return NULL;
        }
 
@@ -2503,8 +2503,8 @@ fc_rport_create(struct Scsi_Host *shost, int channel,
        device_initialize(dev);                 /* takes self reference */
        dev->parent = get_device(&shost->shost_gendev); /* parent reference */
        dev->release = fc_rport_dev_release;
-       sprintf(dev->bus_id, "rport-%d:%d-%d",
-               shost->host_no, channel, rport->number);
+       dev_set_name(dev, "rport-%d:%d-%d",
+                    shost->host_no, channel, rport->number);
        transport_setup_device(dev);
 
        error = device_add(dev);
@@ -2515,6 +2515,9 @@ fc_rport_create(struct Scsi_Host *shost, int channel,
        transport_add_device(dev);
        transport_configure_device(dev);
 
+       fc_bsg_rportadd(shost, rport);
+       /* ignore any bsg add error - we just can't do sgio */
+
        if (rport->roles & FC_PORT_ROLE_FCP_TARGET) {
                /* initiate a scan of the target */
                rport->flags |= FC_RPORT_SCAN_PENDING;
@@ -2663,7 +2666,9 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
 
                                spin_lock_irqsave(shost->host_lock, flags);
 
-                               rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
+                               rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
+                                                 FC_RPORT_DEVLOSS_PENDING |
+                                                 FC_RPORT_DEVLOSS_CALLBK_DONE);
 
                                /* if target, initiate a scan */
                                if (rport->scsi_target_id != -1) {
@@ -2677,6 +2682,8 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
                                        spin_unlock_irqrestore(shost->host_lock,
                                                        flags);
 
+                               fc_bsg_goose_queue(rport);
+
                                return rport;
                        }
                }
@@ -2726,6 +2733,7 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
                        rport->port_id = ids->port_id;
                        rport->roles = ids->roles;
                        rport->port_state = FC_PORTSTATE_ONLINE;
+                       rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
 
                        if (fci->f->dd_fcrport_size)
                                memset(rport->dd_data, 0,
@@ -2808,7 +2816,6 @@ void
 fc_remote_port_delete(struct fc_rport  *rport)
 {
        struct Scsi_Host *shost = rport_to_shost(rport);
-       struct fc_internal *i = to_fc_internal(shost->transportt);
        int timeout = rport->dev_loss_tmo;
        unsigned long flags;
 
@@ -2854,7 +2861,7 @@ fc_remote_port_delete(struct fc_rport  *rport)
 
        /* see if we need to kill io faster than waiting for device loss */
        if ((rport->fast_io_fail_tmo != -1) &&
-           (rport->fast_io_fail_tmo < timeout) && (i->f->terminate_rport_io))
+           (rport->fast_io_fail_tmo < timeout))
                fc_queue_devloss_work(shost, &rport->fail_io_work,
                                        rport->fast_io_fail_tmo * HZ);
 
@@ -2930,7 +2937,8 @@ fc_remote_port_rolechg(struct fc_rport  *rport, u32 roles)
                        fc_flush_devloss(shost);
 
                spin_lock_irqsave(shost->host_lock, flags);
-               rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
+               rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
+                                 FC_RPORT_DEVLOSS_PENDING);
                spin_unlock_irqrestore(shost->host_lock, flags);
 
                /* ensure any stgt delete functions are done */
@@ -2959,6 +2967,7 @@ fc_timeout_deleted_rport(struct work_struct *work)
        struct fc_rport *rport =
                container_of(work, struct fc_rport, dev_loss_work.work);
        struct Scsi_Host *shost = rport_to_shost(rport);
+       struct fc_internal *i = to_fc_internal(shost->transportt);
        struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
        unsigned long flags;
 
@@ -3025,6 +3034,18 @@ fc_timeout_deleted_rport(struct work_struct *work)
        rport->supported_classes = FC_COS_UNSPECIFIED;
        rport->roles = FC_PORT_ROLE_UNKNOWN;
        rport->port_state = FC_PORTSTATE_NOTPRESENT;
+       rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
+       rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE;
+
+       /*
+        * Pre-emptively kill I/O rather than waiting for the work queue
+        * item to teardown the starget. (FCOE libFC folks prefer this
+        * and to have the rport_port_id still set when it's done).
+        */
+       spin_unlock_irqrestore(shost->host_lock, flags);
+       fc_terminate_rport_io(rport);
+
+       BUG_ON(rport->port_state != FC_PORTSTATE_NOTPRESENT);
 
        /* remove the identifiers that aren't used in the consisting binding */
        switch (fc_host->tgtid_bind_type) {
@@ -3049,12 +3070,19 @@ fc_timeout_deleted_rport(struct work_struct *work)
         * went away and didn't come back - we'll remove
         * all attached scsi devices.
         */
-       spin_unlock_irqrestore(shost->host_lock, flags);
-
-       scsi_target_unblock(&rport->dev);
        fc_queue_work(shost, &rport->stgt_delete_work);
+
+       /*
+        * Notify the driver that the rport is now dead. The LLDD will
+        * also guarantee that any communication to the rport is terminated
+        *
+        * Note: we set the CALLBK_DONE flag above to correspond
+        */
+       if (i->f->dev_loss_tmo_callbk)
+               i->f->dev_loss_tmo_callbk(rport);
 }
 
+
 /**
  * fc_timeout_fail_rport_io - Timeout handler for a fast io failing on a disconnected SCSI target.
  * @work:      rport to terminate io on.
@@ -3067,13 +3095,12 @@ fc_timeout_fail_rport_io(struct work_struct *work)
 {
        struct fc_rport *rport =
                container_of(work, struct fc_rport, fail_io_work.work);
-       struct Scsi_Host *shost = rport_to_shost(rport);
-       struct fc_internal *i = to_fc_internal(shost->transportt);
 
        if (rport->port_state != FC_PORTSTATE_BLOCKED)
                return;
 
-       i->f->terminate_rport_io(rport);
+       rport->flags |= FC_RPORT_FAST_FAIL_TIMEDOUT;
+       fc_terminate_rport_io(rport);
 }
 
 /**
@@ -3103,7 +3130,7 @@ fc_scsi_scan_rport(struct work_struct *work)
 
 
 /**
- * fc_vport_create - allocates and creates a FC virtual port.
+ * fc_vport_setup - allocates and creates a FC virtual port.
  * @shost:     scsi host the virtual port is connected to.
  * @channel:   Channel on shost port connected to.
  * @pdev:      parent device for vport
@@ -3118,7 +3145,7 @@ fc_scsi_scan_rport(struct work_struct *work)
  *     This routine assumes no locks are held on entry.
  */
 static int
-fc_vport_create(struct Scsi_Host *shost, int channel, struct device *pdev,
+fc_vport_setup(struct Scsi_Host *shost, int channel, struct device *pdev,
        struct fc_vport_identifiers  *ids, struct fc_vport **ret_vport)
 {
        struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
@@ -3137,7 +3164,7 @@ fc_vport_create(struct Scsi_Host *shost, int channel, struct device *pdev,
        size = (sizeof(struct fc_vport) + fci->f->dd_fcvport_size);
        vport = kzalloc(size, GFP_KERNEL);
        if (unlikely(!vport)) {
-               printk(KERN_ERR "%s: allocation failure\n", __FUNCTION__);
+               printk(KERN_ERR "%s: allocation failure\n", __func__);
                return -ENOMEM;
        }
 
@@ -3172,8 +3199,8 @@ fc_vport_create(struct Scsi_Host *shost, int channel, struct device *pdev,
        device_initialize(dev);                 /* takes self reference */
        dev->parent = get_device(pdev);         /* takes parent reference */
        dev->release = fc_vport_dev_release;
-       sprintf(dev->bus_id, "vport-%d:%d-%d",
-               shost->host_no, channel, vport->number);
+       dev_set_name(dev, "vport-%d:%d-%d",
+                    shost->host_no, channel, vport->number);
        transport_setup_device(dev);
 
        error = device_add(dev);
@@ -3196,19 +3223,19 @@ fc_vport_create(struct Scsi_Host *shost, int channel, struct device *pdev,
         */
        if (pdev != &shost->shost_gendev) {
                error = sysfs_create_link(&shost->shost_gendev.kobj,
-                                &dev->kobj, dev->bus_id);
+                                &dev->kobj, dev_name(dev));
                if (error)
                        printk(KERN_ERR
                                "%s: Cannot create vport symlinks for "
                                "%s, err=%d\n",
-                               __FUNCTION__, dev->bus_id, error);
+                               __func__, dev_name(dev), error);
        }
        spin_lock_irqsave(shost->host_lock, flags);
        vport->flags &= ~FC_VPORT_CREATING;
        spin_unlock_irqrestore(shost->host_lock, flags);
 
        dev_printk(KERN_NOTICE, pdev,
-                       "%s created via shost%d channel %d\n", dev->bus_id,
+                       "%s created via shost%d channel %d\n", dev_name(dev),
                        shost->host_no, channel);
 
        *ret_vport = vport;
@@ -3231,6 +3258,28 @@ delete_vport:
        return error;
 }
 
+/**
+ * fc_vport_create - Admin App or LLDD requests creation of a vport
+ * @shost:     scsi host the virtual port is connected to.
+ * @channel:   channel on shost port connected to.
+ * @ids:       The world wide names, FC4 port roles, etc for
+ *              the virtual port.
+ *
+ * Notes:
+ *     This routine assumes no locks are held on entry.
+ */
+struct fc_vport *
+fc_vport_create(struct Scsi_Host *shost, int channel,
+       struct fc_vport_identifiers *ids)
+{
+       int stat;
+       struct fc_vport *vport;
+
+       stat = fc_vport_setup(shost, channel, &shost->shost_gendev,
+                ids, &vport);
+       return stat ? NULL : vport;
+}
+EXPORT_SYMBOL(fc_vport_create);
 
 /**
  * fc_vport_terminate - Admin App or LLDD requests termination of a vport
@@ -3283,7 +3332,7 @@ fc_vport_terminate(struct fc_vport *vport)
                return stat;
 
        if (dev->parent != &shost->shost_gendev)
-               sysfs_remove_link(&shost->shost_gendev.kobj, dev->bus_id);
+               sysfs_remove_link(&shost->shost_gendev.kobj, dev_name(dev));
        transport_remove_device(dev);
        device_del(dev);
        transport_destroy_device(dev);
@@ -3314,12 +3363,605 @@ fc_vport_sched_delete(struct work_struct *work)
        if (stat)
                dev_printk(KERN_ERR, vport->dev.parent,
                        "%s: %s could not be deleted created via "
-                       "shost%d channel %d - error %d\n", __FUNCTION__,
-                       vport->dev.bus_id, vport->shost->host_no,
+                       "shost%d channel %d - error %d\n", __func__,
+                       dev_name(&vport->dev), vport->shost->host_no,
                        vport->channel, stat);
 }
 
 
+/*
+ * BSG support
+ */
+
+
+/**
+ * fc_destroy_bsgjob - routine to teardown/delete a fc bsg job
+ * @job:       fc_bsg_job that is to be torn down
+ */
+static void
+fc_destroy_bsgjob(struct fc_bsg_job *job)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&job->job_lock, flags);
+       if (job->ref_cnt) {
+               spin_unlock_irqrestore(&job->job_lock, flags);
+               return;
+       }
+       spin_unlock_irqrestore(&job->job_lock, flags);
+
+       put_device(job->dev);   /* release reference for the request */
+
+       kfree(job->request_payload.sg_list);
+       kfree(job->reply_payload.sg_list);
+       kfree(job);
+}
+
+/**
+ * fc_bsg_jobdone - completion routine for bsg requests that the LLD has
+ *                  completed
+ * @job:       fc_bsg_job that is complete
+ */
+static void
+fc_bsg_jobdone(struct fc_bsg_job *job)
+{
+       struct request *req = job->req;
+       struct request *rsp = req->next_rq;
+       int err;
+
+       err = job->req->errors = job->reply->result;
+
+       if (err < 0)
+               /* we're only returning the result field in the reply */
+               job->req->sense_len = sizeof(uint32_t);
+       else
+               job->req->sense_len = job->reply_len;
+
+       /* we assume all request payload was transferred, residual == 0 */
+       req->resid_len = 0;
+
+       if (rsp) {
+               WARN_ON(job->reply->reply_payload_rcv_len > rsp->resid_len);
+
+               /* set reply (bidi) residual */
+               rsp->resid_len -= min(job->reply->reply_payload_rcv_len,
+                                     rsp->resid_len);
+       }
+       blk_complete_request(req);
+}
+
+/**
+ * fc_bsg_softirq_done - softirq done routine for destroying the bsg requests
+ * @rq:        BSG request that holds the job to be destroyed
+ */
+static void fc_bsg_softirq_done(struct request *rq)
+{
+       struct fc_bsg_job *job = rq->special;
+       unsigned long flags;
+
+       spin_lock_irqsave(&job->job_lock, flags);
+       job->state_flags |= FC_RQST_STATE_DONE;
+       job->ref_cnt--;
+       spin_unlock_irqrestore(&job->job_lock, flags);
+
+       blk_end_request_all(rq, rq->errors);
+       fc_destroy_bsgjob(job);
+}
+
+/**
+ * fc_bsg_job_timeout - handler for when a bsg request timesout
+ * @req:       request that timed out
+ */
+static enum blk_eh_timer_return
+fc_bsg_job_timeout(struct request *req)
+{
+       struct fc_bsg_job *job = (void *) req->special;
+       struct Scsi_Host *shost = job->shost;
+       struct fc_internal *i = to_fc_internal(shost->transportt);
+       unsigned long flags;
+       int err = 0, done = 0;
+
+       if (job->rport && job->rport->port_state == FC_PORTSTATE_BLOCKED)
+               return BLK_EH_RESET_TIMER;
+
+       spin_lock_irqsave(&job->job_lock, flags);
+       if (job->state_flags & FC_RQST_STATE_DONE)
+               done = 1;
+       else
+               job->ref_cnt++;
+       spin_unlock_irqrestore(&job->job_lock, flags);
+
+       if (!done && i->f->bsg_timeout) {
+               /* call LLDD to abort the i/o as it has timed out */
+               err = i->f->bsg_timeout(job);
+               if (err)
+                       printk(KERN_ERR "ERROR: FC BSG request timeout - LLD "
+                               "abort failed with status %d\n", err);
+       }
+
+       /* the blk_end_sync_io() doesn't check the error */
+       if (done)
+               return BLK_EH_NOT_HANDLED;
+       else
+               return BLK_EH_HANDLED;
+}
+
+static int
+fc_bsg_map_buffer(struct fc_bsg_buffer *buf, struct request *req)
+{
+       size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments);
+
+       BUG_ON(!req->nr_phys_segments);
+
+       buf->sg_list = kzalloc(sz, GFP_KERNEL);
+       if (!buf->sg_list)
+               return -ENOMEM;
+       sg_init_table(buf->sg_list, req->nr_phys_segments);
+       buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list);
+       buf->payload_len = blk_rq_bytes(req);
+       return 0;
+}
+
+
+/**
+ * fc_req_to_bsgjob - Allocate/create the fc_bsg_job structure for the
+ *                   bsg request
+ * @shost:     SCSI Host corresponding to the bsg object
+ * @rport:     (optional) FC Remote Port corresponding to the bsg object
+ * @req:       BSG request that needs a job structure
+ */
+static int
+fc_req_to_bsgjob(struct Scsi_Host *shost, struct fc_rport *rport,
+       struct request *req)
+{
+       struct fc_internal *i = to_fc_internal(shost->transportt);
+       struct request *rsp = req->next_rq;
+       struct fc_bsg_job *job;
+       int ret;
+
+       BUG_ON(req->special);
+
+       job = kzalloc(sizeof(struct fc_bsg_job) + i->f->dd_bsg_size,
+                       GFP_KERNEL);
+       if (!job)
+               return -ENOMEM;
+
+       /*
+        * Note: this is a bit silly.
+        * The request gets formatted as a SGIO v4 ioctl request, which
+        * then gets reformatted as a blk request, which then gets
+        * reformatted as a fc bsg request. And on completion, we have
+        * to wrap return results such that SGIO v4 thinks it was a scsi
+        * status.  I hope this was all worth it.
+        */
+
+       req->special = job;
+       job->shost = shost;
+       job->rport = rport;
+       job->req = req;
+       if (i->f->dd_bsg_size)
+               job->dd_data = (void *)&job[1];
+       spin_lock_init(&job->job_lock);
+       job->request = (struct fc_bsg_request *)req->cmd;
+       job->request_len = req->cmd_len;
+       job->reply = req->sense;
+       job->reply_len = SCSI_SENSE_BUFFERSIZE; /* Size of sense buffer
+                                                * allocated */
+       if (req->bio) {
+               ret = fc_bsg_map_buffer(&job->request_payload, req);
+               if (ret)
+                       goto failjob_rls_job;
+       }
+       if (rsp && rsp->bio) {
+               ret = fc_bsg_map_buffer(&job->reply_payload, rsp);
+               if (ret)
+                       goto failjob_rls_rqst_payload;
+       }
+       job->job_done = fc_bsg_jobdone;
+       if (rport)
+               job->dev = &rport->dev;
+       else
+               job->dev = &shost->shost_gendev;
+       get_device(job->dev);           /* take a reference for the request */
+
+       job->ref_cnt = 1;
+
+       return 0;
+
+
+failjob_rls_rqst_payload:
+       kfree(job->request_payload.sg_list);
+failjob_rls_job:
+       kfree(job);
+       return -ENOMEM;
+}
+
+
+enum fc_dispatch_result {
+       FC_DISPATCH_BREAK,      /* on return, q is locked, break from q loop */
+       FC_DISPATCH_LOCKED,     /* on return, q is locked, continue on */
+       FC_DISPATCH_UNLOCKED,   /* on return, q is unlocked, continue on */
+};
+
+
+/**
+ * fc_bsg_host_dispatch - process fc host bsg requests and dispatch to LLDD
+ * @q:         fc host request queue
+ * @shost:     scsi host rport attached to
+ * @job:       bsg job to be processed
+ */
+static enum fc_dispatch_result
+fc_bsg_host_dispatch(struct request_queue *q, struct Scsi_Host *shost,
+                        struct fc_bsg_job *job)
+{
+       struct fc_internal *i = to_fc_internal(shost->transportt);
+       int cmdlen = sizeof(uint32_t);  /* start with length of msgcode */
+       int ret;
+
+       /* Validate the host command */
+       switch (job->request->msgcode) {
+       case FC_BSG_HST_ADD_RPORT:
+               cmdlen += sizeof(struct fc_bsg_host_add_rport);
+               break;
+
+       case FC_BSG_HST_DEL_RPORT:
+               cmdlen += sizeof(struct fc_bsg_host_del_rport);
+               break;
+
+       case FC_BSG_HST_ELS_NOLOGIN:
+               cmdlen += sizeof(struct fc_bsg_host_els);
+               /* there better be a xmt and rcv payloads */
+               if ((!job->request_payload.payload_len) ||
+                   (!job->reply_payload.payload_len)) {
+                       ret = -EINVAL;
+                       goto fail_host_msg;
+               }
+               break;
+
+       case FC_BSG_HST_CT:
+               cmdlen += sizeof(struct fc_bsg_host_ct);
+               /* there better be xmt and rcv payloads */
+               if ((!job->request_payload.payload_len) ||
+                   (!job->reply_payload.payload_len)) {
+                       ret = -EINVAL;
+                       goto fail_host_msg;
+               }
+               break;
+
+       case FC_BSG_HST_VENDOR:
+               cmdlen += sizeof(struct fc_bsg_host_vendor);
+               if ((shost->hostt->vendor_id == 0L) ||
+                   (job->request->rqst_data.h_vendor.vendor_id !=
+                       shost->hostt->vendor_id)) {
+                       ret = -ESRCH;
+                       goto fail_host_msg;
+               }
+               break;
+
+       default:
+               ret = -EBADR;
+               goto fail_host_msg;
+       }
+
+       /* check if we really have all the request data needed */
+       if (job->request_len < cmdlen) {
+               ret = -ENOMSG;
+               goto fail_host_msg;
+       }
+
+       ret = i->f->bsg_request(job);
+       if (!ret)
+               return FC_DISPATCH_UNLOCKED;
+
+fail_host_msg:
+       /* return the errno failure code as the only status */
+       BUG_ON(job->reply_len < sizeof(uint32_t));
+       job->reply->result = ret;
+       job->reply_len = sizeof(uint32_t);
+       fc_bsg_jobdone(job);
+       return FC_DISPATCH_UNLOCKED;
+}
+
+
+/*
+ * fc_bsg_goose_queue - restart rport queue in case it was stopped
+ * @rport:     rport to be restarted
+ */
+static void
+fc_bsg_goose_queue(struct fc_rport *rport)
+{
+       int flagset;
+       unsigned long flags;
+
+       if (!rport->rqst_q)
+               return;
+
+       get_device(&rport->dev);
+
+       spin_lock_irqsave(rport->rqst_q->queue_lock, flags);
+       flagset = test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags) &&
+                 !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags);
+       if (flagset)
+               queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q);
+       __blk_run_queue(rport->rqst_q);
+       if (flagset)
+               queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q);
+       spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags);
+
+       put_device(&rport->dev);
+}
+
+
+/**
+ * fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD
+ * @q:         rport request queue
+ * @shost:     scsi host rport attached to
+ * @rport:     rport request destined to
+ * @job:       bsg job to be processed
+ */
+static enum fc_dispatch_result
+fc_bsg_rport_dispatch(struct request_queue *q, struct Scsi_Host *shost,
+                        struct fc_rport *rport, struct fc_bsg_job *job)
+{
+       struct fc_internal *i = to_fc_internal(shost->transportt);
+       int cmdlen = sizeof(uint32_t);  /* start with length of msgcode */
+       int ret;
+
+       /* Validate the rport command */
+       switch (job->request->msgcode) {
+       case FC_BSG_RPT_ELS:
+               cmdlen += sizeof(struct fc_bsg_rport_els);
+               goto check_bidi;
+
+       case FC_BSG_RPT_CT:
+               cmdlen += sizeof(struct fc_bsg_rport_ct);
+check_bidi:
+               /* there better be xmt and rcv payloads */
+               if ((!job->request_payload.payload_len) ||
+                   (!job->reply_payload.payload_len)) {
+                       ret = -EINVAL;
+                       goto fail_rport_msg;
+               }
+               break;
+       default:
+               ret = -EBADR;
+               goto fail_rport_msg;
+       }
+
+       /* check if we really have all the request data needed */
+       if (job->request_len < cmdlen) {
+               ret = -ENOMSG;
+               goto fail_rport_msg;
+       }
+
+       ret = i->f->bsg_request(job);
+       if (!ret)
+               return FC_DISPATCH_UNLOCKED;
+
+fail_rport_msg:
+       /* return the errno failure code as the only status */
+       BUG_ON(job->reply_len < sizeof(uint32_t));
+       job->reply->result = ret;
+       job->reply_len = sizeof(uint32_t);
+       fc_bsg_jobdone(job);
+       return FC_DISPATCH_UNLOCKED;
+}
+
+
+/**
+ * fc_bsg_request_handler - generic handler for bsg requests
+ * @q:         request queue to manage
+ * @shost:     Scsi_Host related to the bsg object
+ * @rport:     FC remote port related to the bsg object (optional)
+ * @dev:       device structure for bsg object
+ */
+static void
+fc_bsg_request_handler(struct request_queue *q, struct Scsi_Host *shost,
+                      struct fc_rport *rport, struct device *dev)
+{
+       struct request *req;
+       struct fc_bsg_job *job;
+       enum fc_dispatch_result ret;
+
+       if (!get_device(dev))
+               return;
+
+       while (!blk_queue_plugged(q)) {
+               if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED))
+                               break;
+
+               req = blk_fetch_request(q);
+               if (!req)
+                       break;
+
+               if (rport && (rport->port_state != FC_PORTSTATE_ONLINE)) {
+                       req->errors = -ENXIO;
+                       spin_unlock_irq(q->queue_lock);
+                       blk_end_request(req, -ENXIO, blk_rq_bytes(req));
+                       spin_lock_irq(q->queue_lock);
+                       continue;
+               }
+
+               spin_unlock_irq(q->queue_lock);
+
+               ret = fc_req_to_bsgjob(shost, rport, req);
+               if (ret) {
+                       req->errors = ret;
+                       blk_end_request(req, ret, blk_rq_bytes(req));
+                       spin_lock_irq(q->queue_lock);
+                       continue;
+               }
+
+               job = req->special;
+
+               /* check if we have the msgcode value at least */
+               if (job->request_len < sizeof(uint32_t)) {
+                       BUG_ON(job->reply_len < sizeof(uint32_t));
+                       job->reply->result = -ENOMSG;
+                       job->reply_len = sizeof(uint32_t);
+                       fc_bsg_jobdone(job);
+                       spin_lock_irq(q->queue_lock);
+                       continue;
+               }
+
+               /* the dispatch routines will unlock the queue_lock */
+               if (rport)
+                       ret = fc_bsg_rport_dispatch(q, shost, rport, job);
+               else
+                       ret = fc_bsg_host_dispatch(q, shost, job);
+
+               /* did dispatcher hit state that can't process any more */
+               if (ret == FC_DISPATCH_BREAK)
+                       break;
+
+               /* did dispatcher had released the lock */
+               if (ret == FC_DISPATCH_UNLOCKED)
+                       spin_lock_irq(q->queue_lock);
+       }
+
+       spin_unlock_irq(q->queue_lock);
+       put_device(dev);
+       spin_lock_irq(q->queue_lock);
+}
+
+
+/**
+ * fc_bsg_host_handler - handler for bsg requests for a fc host
+ * @q:         fc host request queue
+ */
+static void
+fc_bsg_host_handler(struct request_queue *q)
+{
+       struct Scsi_Host *shost = q->queuedata;
+
+       fc_bsg_request_handler(q, shost, NULL, &shost->shost_gendev);
+}
+
+
+/**
+ * fc_bsg_rport_handler - handler for bsg requests for a fc rport
+ * @q:         rport request queue
+ */
+static void
+fc_bsg_rport_handler(struct request_queue *q)
+{
+       struct fc_rport *rport = q->queuedata;
+       struct Scsi_Host *shost = rport_to_shost(rport);
+
+       fc_bsg_request_handler(q, shost, rport, &rport->dev);
+}
+
+
+/**
+ * fc_bsg_hostadd - Create and add the bsg hooks so we can receive requests
+ * @shost:     shost for fc_host
+ * @fc_host:   fc_host adding the structures to
+ */
+static int
+fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host)
+{
+       struct device *dev = &shost->shost_gendev;
+       struct fc_internal *i = to_fc_internal(shost->transportt);
+       struct request_queue *q;
+       int err;
+       char bsg_name[20];
+
+       fc_host->rqst_q = NULL;
+
+       if (!i->f->bsg_request)
+               return -ENOTSUPP;
+
+       snprintf(bsg_name, sizeof(bsg_name),
+                "fc_host%d", shost->host_no);
+
+       q = __scsi_alloc_queue(shost, fc_bsg_host_handler);
+       if (!q) {
+               printk(KERN_ERR "fc_host%d: bsg interface failed to "
+                               "initialize - no request queue\n",
+                                shost->host_no);
+               return -ENOMEM;
+       }
+
+       q->queuedata = shost;
+       queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
+       blk_queue_softirq_done(q, fc_bsg_softirq_done);
+       blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
+       blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT);
+
+       err = bsg_register_queue(q, dev, bsg_name, NULL);
+       if (err) {
+               printk(KERN_ERR "fc_host%d: bsg interface failed to "
+                               "initialize - register queue\n",
+                               shost->host_no);
+               blk_cleanup_queue(q);
+               return err;
+       }
+
+       fc_host->rqst_q = q;
+       return 0;
+}
+
+
+/**
+ * fc_bsg_rportadd - Create and add the bsg hooks so we can receive requests
+ * @shost:     shost that rport is attached to
+ * @rport:     rport that the bsg hooks are being attached to
+ */
+static int
+fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport)
+{
+       struct device *dev = &rport->dev;
+       struct fc_internal *i = to_fc_internal(shost->transportt);
+       struct request_queue *q;
+       int err;
+
+       rport->rqst_q = NULL;
+
+       if (!i->f->bsg_request)
+               return -ENOTSUPP;
+
+       q = __scsi_alloc_queue(shost, fc_bsg_rport_handler);
+       if (!q) {
+               printk(KERN_ERR "%s: bsg interface failed to "
+                               "initialize - no request queue\n",
+                                dev->kobj.name);
+               return -ENOMEM;
+       }
+
+       q->queuedata = rport;
+       queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
+       blk_queue_softirq_done(q, fc_bsg_softirq_done);
+       blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
+       blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
+
+       err = bsg_register_queue(q, dev, NULL, NULL);
+       if (err) {
+               printk(KERN_ERR "%s: bsg interface failed to "
+                               "initialize - register queue\n",
+                                dev->kobj.name);
+               blk_cleanup_queue(q);
+               return err;
+       }
+
+       rport->rqst_q = q;
+       return 0;
+}
+
+
+/**
+ * fc_bsg_remove - Deletes the bsg hooks on fchosts/rports
+ * @q: the request_queue that is to be torn down.
+ */
+static void
+fc_bsg_remove(struct request_queue *q)
+{
+       if (q) {
+               bsg_unregister_queue(q);
+               blk_cleanup_queue(q);
+       }
+}
+
+
 /* Original Author:  Martin Hicks */
 MODULE_AUTHOR("James Smart");
 MODULE_DESCRIPTION("FC Transport Attributes");