perf_counter: Fix dynamic irq_period logging
[safe/jmp/linux-2.6] / drivers / scsi / scsi_error.c
index 5bf8be2..0c2c73b 100644 (file)
@@ -124,33 +124,22 @@ int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
 enum blk_eh_timer_return scsi_times_out(struct request *req)
 {
        struct scsi_cmnd *scmd = req->special;
-       enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
        enum blk_eh_timer_return rtn = BLK_EH_NOT_HANDLED;
 
        scsi_log_completion(scmd, TIMEOUT_ERROR);
 
        if (scmd->device->host->transportt->eh_timed_out)
-               eh_timed_out = scmd->device->host->transportt->eh_timed_out;
+               rtn = scmd->device->host->transportt->eh_timed_out(scmd);
        else if (scmd->device->host->hostt->eh_timed_out)
-               eh_timed_out = scmd->device->host->hostt->eh_timed_out;
-       else
-               eh_timed_out = NULL;
+               rtn = scmd->device->host->hostt->eh_timed_out(scmd);
 
-       if (eh_timed_out)
-               rtn = eh_timed_out(scmd);
-               switch (rtn) {
-               case BLK_EH_NOT_HANDLED:
-                       break;
-               default:
-                       return rtn;
-               }
-
-       if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) {
+       if (unlikely(rtn == BLK_EH_NOT_HANDLED &&
+                    !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) {
                scmd->result |= DID_TIME_OUT << 16;
-               return BLK_EH_HANDLED;
+               rtn = BLK_EH_HANDLED;
        }
 
-       return BLK_EH_NOT_HANDLED;
+       return rtn;
 }
 
 /**
@@ -932,8 +921,7 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
                int i, rtn = NEEDS_RETRY;
 
                for (i = 0; rtn == NEEDS_RETRY && i < 2; i++)
-                       rtn = scsi_send_eh_cmnd(scmd, stu_command, 6,
-                                               scmd->device->timeout, 0);
+                       rtn = scsi_send_eh_cmnd(scmd, stu_command, 6, scmd->device->request_queue->rq_timeout, 0);
 
                if (rtn == SUCCESS)
                        return 0;
@@ -1065,10 +1053,10 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
                                struct list_head *done_q)
 {
        struct scsi_cmnd *scmd, *tgtr_scmd, *next;
-       unsigned int id;
+       unsigned int id = 0;
        int rtn;
 
-       for (id = 0; id <= shost->max_id; id++) {
+       do {
                tgtr_scmd = NULL;
                list_for_each_entry(scmd, work_q, eh_entry) {
                        if (id == scmd_id(scmd)) {
@@ -1076,8 +1064,18 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
                                break;
                        }
                }
+               if (!tgtr_scmd) {
+                       /* not one exactly equal; find the next highest */
+                       list_for_each_entry(scmd, work_q, eh_entry) {
+                               if (scmd_id(scmd) > id &&
+                                   (!tgtr_scmd ||
+                                    scmd_id(tgtr_scmd) > scmd_id(scmd)))
+                                               tgtr_scmd = scmd;
+                       }
+               }
                if (!tgtr_scmd)
-                       continue;
+                       /* no more commands, that's it */
+                       break;
 
                SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending target reset "
                                                  "to target %d\n",
@@ -1096,7 +1094,8 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
                                                          " failed target: "
                                                          "%d\n",
                                                          current->comm, id));
-       }
+               id++;
+       } while(id != 0);
 
        return list_empty(work_q);
 }
@@ -1219,6 +1218,40 @@ static void scsi_eh_offline_sdevs(struct list_head *work_q,
 }
 
 /**
+ * scsi_noretry_cmd - determinte if command should be failed fast
+ * @scmd:      SCSI cmd to examine.
+ */
+int scsi_noretry_cmd(struct scsi_cmnd *scmd)
+{
+       switch (host_byte(scmd->result)) {
+       case DID_OK:
+               break;
+       case DID_BUS_BUSY:
+               return blk_failfast_transport(scmd->request);
+       case DID_PARITY:
+               return blk_failfast_dev(scmd->request);
+       case DID_ERROR:
+               if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
+                   status_byte(scmd->result) == RESERVATION_CONFLICT)
+                       return 0;
+               /* fall through */
+       case DID_SOFT_ERROR:
+               return blk_failfast_driver(scmd->request);
+       }
+
+       switch (status_byte(scmd->result)) {
+       case CHECK_CONDITION:
+               /*
+                * assume caller has checked sense and determinted
+                * the check condition was retryable.
+                */
+               return blk_failfast_dev(scmd->request);
+       }
+
+       return 0;
+}
+
+/**
  * scsi_decide_disposition - Disposition a cmd on return from LLD.
  * @scmd:      SCSI cmd to examine.
  *
@@ -1295,9 +1328,10 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
                 * LLD/transport was disrupted during processing of the IO.
                 * The transport class is now blocked/blocking,
                 * and the transport will decide what to do with the IO
-                * based on its timers and recovery capablilities.
+                * based on its timers and recovery capablilities if
+                * there are enough retries.
                 */
-               return ADD_TO_MLQUEUE;
+               goto maybe_retry;
        case DID_TRANSPORT_FAILFAST:
                /*
                 * The transport decided to failfast the IO (most likely
@@ -1360,8 +1394,9 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
                return ADD_TO_MLQUEUE;
        case GOOD:
        case COMMAND_TERMINATED:
-       case TASK_ABORTED:
                return SUCCESS;
+       case TASK_ABORTED:
+               goto maybe_retry;
        case CHECK_CONDITION:
                rtn = scsi_check_sense(scmd);
                if (rtn == NEEDS_RETRY)
@@ -1396,7 +1431,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
         * even if the request is marked fast fail, we still requeue
         * for queue congestion conditions (QUEUE_FULL or BUSY) */
        if ((++scmd->retries) <= scmd->allowed
-           && !blk_noretry_request(scmd->request)) {
+           && !scsi_noretry_cmd(scmd)) {
                return NEEDS_RETRY;
        } else {
                /*
@@ -1406,6 +1441,11 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
        }
 }
 
+static void eh_lock_door_done(struct request *req, int uptodate)
+{
+       __blk_put_request(req->q, req);
+}
+
 /**
  * scsi_eh_lock_door - Prevent medium removal for the specified device
  * @sdev:      SCSI device to prevent medium removal
@@ -1428,19 +1468,28 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
  */
 static void scsi_eh_lock_door(struct scsi_device *sdev)
 {
-       unsigned char cmnd[MAX_COMMAND_SIZE];
+       struct request *req;
 
-       cmnd[0] = ALLOW_MEDIUM_REMOVAL;
-       cmnd[1] = 0;
-       cmnd[2] = 0;
-       cmnd[3] = 0;
-       cmnd[4] = SCSI_REMOVAL_PREVENT;
-       cmnd[5] = 0;
+       req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL);
+       if (!req)
+               return;
 
-       scsi_execute_async(sdev, cmnd, 6, DMA_NONE, NULL, 0, 0, 10 * HZ,
-                          5, NULL, NULL, GFP_KERNEL);
-}
+       req->cmd[0] = ALLOW_MEDIUM_REMOVAL;
+       req->cmd[1] = 0;
+       req->cmd[2] = 0;
+       req->cmd[3] = 0;
+       req->cmd[4] = SCSI_REMOVAL_PREVENT;
+       req->cmd[5] = 0;
 
+       req->cmd_len = COMMAND_SIZE(req->cmd[0]);
+
+       req->cmd_type = REQ_TYPE_BLOCK_PC;
+       req->cmd_flags |= REQ_QUIET;
+       req->timeout = 10 * HZ;
+       req->retries = 5;
+
+       blk_execute_rq_nowait(req->q, NULL, req, 1, eh_lock_door_done);
+}
 
 /**
  * scsi_restart_operations - restart io operations to the specified host.
@@ -1521,7 +1570,7 @@ void scsi_eh_flush_done_q(struct list_head *done_q)
        list_for_each_entry_safe(scmd, next, done_q, eh_entry) {
                list_del_init(&scmd->eh_entry);
                if (scsi_device_online(scmd->device) &&
-                   !blk_noretry_request(scmd->request) &&
+                   !scsi_noretry_cmd(scmd) &&
                    (++scmd->retries <= scmd->allowed)) {
                        SCSI_LOG_ERROR_RECOVERY(3, printk("%s: flush"
                                                          " retry cmd: %p\n",
@@ -1603,7 +1652,7 @@ int scsi_error_handler(void *data)
         * We use TASK_INTERRUPTIBLE so that the thread is not
         * counted against the load average as a running process.
         * We never actually get interrupted because kthread_run
-        * disables singal delivery for the created thread.
+        * disables signal delivery for the created thread.
         */
        set_current_state(TASK_INTERRUPTIBLE);
        while (!kthread_should_stop()) {