[SCSI] qla2xxx: Support for asynchronous TM and Marker IOCBs.
[safe/jmp/linux-2.6] / drivers / scsi / scsi_transport_fc.c
index a895a0e..0681378 100644 (file)
@@ -834,7 +834,7 @@ static ssize_t
 store_fc_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
                            const char *buf, size_t count)
 {
-       int val;
+       unsigned long val;
        struct fc_rport *rport = transport_class_to_rport(dev);
        struct Scsi_Host *shost = rport_to_shost(rport);
        struct fc_internal *i = to_fc_internal(shost->transportt);
@@ -848,6 +848,12 @@ store_fc_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
                return -EINVAL;
 
        /*
+        * Check for overflow; dev_loss_tmo is u32
+        */
+       if (val > UINT_MAX)
+               return -EINVAL;
+
+       /*
         * If fast_io_fail is off we have to cap
         * dev_loss_tmo at SCSI_DEVICE_BLOCK_MAX_TIMEOUT
         */
@@ -2865,7 +2871,7 @@ void
 fc_remote_port_delete(struct fc_rport  *rport)
 {
        struct Scsi_Host *shost = rport_to_shost(rport);
-       int timeout = rport->dev_loss_tmo;
+       unsigned long timeout = rport->dev_loss_tmo;
        unsigned long flags;
 
        /*
@@ -3191,23 +3197,33 @@ fc_scsi_scan_rport(struct work_struct *work)
  *
  * This routine can be called from a FC LLD scsi_eh callback. It
  * blocks the scsi_eh thread until the fc_rport leaves the
- * FC_PORTSTATE_BLOCKED. This is necessary to avoid the scsi_eh
- * failing recovery actions for blocked rports which would lead to
- * offlined SCSI devices.
+ * FC_PORTSTATE_BLOCKED, or the fast_io_fail_tmo fires. This is
+ * necessary to avoid the scsi_eh failing recovery actions for blocked
+ * rports which would lead to offlined SCSI devices.
+ *
+ * Returns: 0 if the fc_rport left the state FC_PORTSTATE_BLOCKED.
+ *         FAST_IO_FAIL if the fast_io_fail_tmo fired, this should be
+ *         passed back to scsi_eh.
  */
-void fc_block_scsi_eh(struct scsi_cmnd *cmnd)
+int fc_block_scsi_eh(struct scsi_cmnd *cmnd)
 {
        struct Scsi_Host *shost = cmnd->device->host;
        struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
        unsigned long flags;
 
        spin_lock_irqsave(shost->host_lock, flags);
-       while (rport->port_state == FC_PORTSTATE_BLOCKED) {
+       while (rport->port_state == FC_PORTSTATE_BLOCKED &&
+              !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)) {
                spin_unlock_irqrestore(shost->host_lock, flags);
                msleep(1000);
                spin_lock_irqsave(shost->host_lock, flags);
        }
        spin_unlock_irqrestore(shost->host_lock, flags);
+
+       if (rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)
+               return FAST_IO_FAIL;
+
+       return 0;
 }
 EXPORT_SYMBOL(fc_block_scsi_eh);
 
@@ -3853,7 +3869,7 @@ fc_bsg_request_handler(struct request_queue *q, struct Scsi_Host *shost,
                if (rport && (rport->port_state != FC_PORTSTATE_ONLINE)) {
                        req->errors = -ENXIO;
                        spin_unlock_irq(q->queue_lock);
-                       blk_end_request(req, -ENXIO, blk_rq_bytes(req));
+                       blk_end_request_all(req, -ENXIO);
                        spin_lock_irq(q->queue_lock);
                        continue;
                }
@@ -3863,7 +3879,7 @@ fc_bsg_request_handler(struct request_queue *q, struct Scsi_Host *shost,
                ret = fc_req_to_bsgjob(shost, rport, req);
                if (ret) {
                        req->errors = ret;
-                       blk_end_request(req, ret, blk_rq_bytes(req));
+                       blk_end_request_all(req, ret);
                        spin_lock_irq(q->queue_lock);
                        continue;
                }