[SCSI] convert SPI transport class to scsi_execute
[safe/jmp/linux-2.6] / drivers / scsi / scsi_lib.c
1 /*
2  *  scsi_lib.c Copyright (C) 1999 Eric Youngdale
3  *
4  *  SCSI queueing library.
5  *      Initial versions: Eric Youngdale (eric@andante.org).
6  *                        Based upon conversations with large numbers
7  *                        of people at Linux Expo.
8  */
9
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/completion.h>
13 #include <linux/kernel.h>
14 #include <linux/mempool.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/pci.h>
18 #include <linux/delay.h>
19
20 #include <scsi/scsi.h>
21 #include <scsi/scsi_dbg.h>
22 #include <scsi/scsi_device.h>
23 #include <scsi/scsi_driver.h>
24 #include <scsi/scsi_eh.h>
25 #include <scsi/scsi_host.h>
26 #include <scsi/scsi_request.h>
27
28 #include "scsi_priv.h"
29 #include "scsi_logging.h"
30
31
32 #define SG_MEMPOOL_NR           (sizeof(scsi_sg_pools)/sizeof(struct scsi_host_sg_pool))
33 #define SG_MEMPOOL_SIZE         32
34
35 struct scsi_host_sg_pool {
36         size_t          size;
37         char            *name; 
38         kmem_cache_t    *slab;
39         mempool_t       *pool;
40 };
41
42 #if (SCSI_MAX_PHYS_SEGMENTS < 32)
43 #error SCSI_MAX_PHYS_SEGMENTS is too small
44 #endif
45
46 #define SP(x) { x, "sgpool-" #x } 
47 static struct scsi_host_sg_pool scsi_sg_pools[] = {
48         SP(8),
49         SP(16),
50         SP(32),
51 #if (SCSI_MAX_PHYS_SEGMENTS > 32)
52         SP(64),
53 #if (SCSI_MAX_PHYS_SEGMENTS > 64)
54         SP(128),
55 #if (SCSI_MAX_PHYS_SEGMENTS > 128)
56         SP(256),
57 #if (SCSI_MAX_PHYS_SEGMENTS > 256)
58 #error SCSI_MAX_PHYS_SEGMENTS is too large
59 #endif
60 #endif
61 #endif
62 #endif
63 };      
64 #undef SP
65
66
67 /*
68  * Function:    scsi_insert_special_req()
69  *
70  * Purpose:     Insert pre-formed request into request queue.
71  *
72  * Arguments:   sreq    - request that is ready to be queued.
73  *              at_head - boolean.  True if we should insert at head
74  *                        of queue, false if we should insert at tail.
75  *
76  * Lock status: Assumed that lock is not held upon entry.
77  *
78  * Returns:     Nothing
79  *
80  * Notes:       This function is called from character device and from
81  *              ioctl types of functions where the caller knows exactly
82  *              what SCSI command needs to be issued.   The idea is that
83  *              we merely inject the command into the queue (at the head
84  *              for now), and then call the queue request function to actually
85  *              process it.
86  */
87 int scsi_insert_special_req(struct scsi_request *sreq, int at_head)
88 {
89         /*
90          * Because users of this function are apt to reuse requests with no
91          * modification, we have to sanitise the request flags here
92          */
93         sreq->sr_request->flags &= ~REQ_DONTPREP;
94         blk_insert_request(sreq->sr_device->request_queue, sreq->sr_request,
95                            at_head, sreq);
96         return 0;
97 }
98
99 static void scsi_run_queue(struct request_queue *q);
100
101 /*
102  * Function:    scsi_queue_insert()
103  *
104  * Purpose:     Insert a command in the midlevel queue.
105  *
106  * Arguments:   cmd    - command that we are adding to queue.
107  *              reason - why we are inserting command to queue.
108  *
109  * Lock status: Assumed that lock is not held upon entry.
110  *
111  * Returns:     Nothing.
112  *
113  * Notes:       We do this for one of two cases.  Either the host is busy
114  *              and it cannot accept any more commands for the time being,
115  *              or the device returned QUEUE_FULL and can accept no more
116  *              commands.
117  * Notes:       This could be called either from an interrupt context or a
118  *              normal process context.
119  */
120 int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
121 {
122         struct Scsi_Host *host = cmd->device->host;
123         struct scsi_device *device = cmd->device;
124         struct request_queue *q = device->request_queue;
125         unsigned long flags;
126
127         SCSI_LOG_MLQUEUE(1,
128                  printk("Inserting command %p into mlqueue\n", cmd));
129
130         /*
131          * Set the appropriate busy bit for the device/host.
132          *
133          * If the host/device isn't busy, assume that something actually
134          * completed, and that we should be able to queue a command now.
135          *
136          * Note that the prior mid-layer assumption that any host could
137          * always queue at least one command is now broken.  The mid-layer
138          * will implement a user specifiable stall (see
139          * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
140          * if a command is requeued with no other commands outstanding
141          * either for the device or for the host.
142          */
143         if (reason == SCSI_MLQUEUE_HOST_BUSY)
144                 host->host_blocked = host->max_host_blocked;
145         else if (reason == SCSI_MLQUEUE_DEVICE_BUSY)
146                 device->device_blocked = device->max_device_blocked;
147
148         /*
149          * Decrement the counters, since these commands are no longer
150          * active on the host/device.
151          */
152         scsi_device_unbusy(device);
153
154         /*
155          * Requeue this command.  It will go before all other commands
156          * that are already in the queue.
157          *
158          * NOTE: there is magic here about the way the queue is plugged if
159          * we have no outstanding commands.
160          * 
161          * Although we *don't* plug the queue, we call the request
162          * function.  The SCSI request function detects the blocked condition
163          * and plugs the queue appropriately.
164          */
165         spin_lock_irqsave(q->queue_lock, flags);
166         blk_requeue_request(q, cmd->request);
167         spin_unlock_irqrestore(q->queue_lock, flags);
168
169         scsi_run_queue(q);
170
171         return 0;
172 }
173
174 /*
175  * Function:    scsi_do_req
176  *
177  * Purpose:     Queue a SCSI request
178  *
179  * Arguments:   sreq      - command descriptor.
180  *              cmnd      - actual SCSI command to be performed.
181  *              buffer    - data buffer.
182  *              bufflen   - size of data buffer.
183  *              done      - completion function to be run.
184  *              timeout   - how long to let it run before timeout.
185  *              retries   - number of retries we allow.
186  *
187  * Lock status: No locks held upon entry.
188  *
189  * Returns:     Nothing.
190  *
191  * Notes:       This function is only used for queueing requests for things
192  *              like ioctls and character device requests - this is because
193  *              we essentially just inject a request into the queue for the
194  *              device.
195  *
196  *              In order to support the scsi_device_quiesce function, we
197  *              now inject requests on the *head* of the device queue
198  *              rather than the tail.
199  */
200 void scsi_do_req(struct scsi_request *sreq, const void *cmnd,
201                  void *buffer, unsigned bufflen,
202                  void (*done)(struct scsi_cmnd *),
203                  int timeout, int retries)
204 {
205         /*
206          * If the upper level driver is reusing these things, then
207          * we should release the low-level block now.  Another one will
208          * be allocated later when this request is getting queued.
209          */
210         __scsi_release_request(sreq);
211
212         /*
213          * Our own function scsi_done (which marks the host as not busy,
214          * disables the timeout counter, etc) will be called by us or by the
215          * scsi_hosts[host].queuecommand() function needs to also call
216          * the completion function for the high level driver.
217          */
218         memcpy(sreq->sr_cmnd, cmnd, sizeof(sreq->sr_cmnd));
219         sreq->sr_bufflen = bufflen;
220         sreq->sr_buffer = buffer;
221         sreq->sr_allowed = retries;
222         sreq->sr_done = done;
223         sreq->sr_timeout_per_command = timeout;
224
225         if (sreq->sr_cmd_len == 0)
226                 sreq->sr_cmd_len = COMMAND_SIZE(sreq->sr_cmnd[0]);
227
228         /*
229          * head injection *required* here otherwise quiesce won't work
230          */
231         scsi_insert_special_req(sreq, 1);
232 }
233 EXPORT_SYMBOL(scsi_do_req);
234
235 /* This is the end routine we get to if a command was never attached
236  * to the request.  Simply complete the request without changing
237  * rq_status; this will cause a DRIVER_ERROR. */
238 static void scsi_wait_req_end_io(struct request *req)
239 {
240         BUG_ON(!req->waiting);
241
242         complete(req->waiting);
243 }
244
245 void scsi_wait_req(struct scsi_request *sreq, const void *cmnd, void *buffer,
246                    unsigned bufflen, int timeout, int retries)
247 {
248         DECLARE_COMPLETION(wait);
249         int write = (sreq->sr_data_direction == DMA_TO_DEVICE);
250         struct request *req;
251
252         req = blk_get_request(sreq->sr_device->request_queue, write,
253                               __GFP_WAIT);
254         if (bufflen && blk_rq_map_kern(sreq->sr_device->request_queue, req,
255                                        buffer, bufflen, __GFP_WAIT)) {
256                 sreq->sr_result = DRIVER_ERROR << 24;
257                 blk_put_request(req);
258                 return;
259         }
260
261         req->flags |= REQ_NOMERGE;
262         req->waiting = &wait;
263         req->end_io = scsi_wait_req_end_io;
264         req->cmd_len = COMMAND_SIZE(((u8 *)cmnd)[0]);
265         req->sense = sreq->sr_sense_buffer;
266         req->sense_len = 0;
267         memcpy(req->cmd, cmnd, req->cmd_len);
268         req->timeout = timeout;
269         req->flags |= REQ_BLOCK_PC;
270         req->rq_disk = NULL;
271         blk_insert_request(sreq->sr_device->request_queue, req,
272                            sreq->sr_data_direction == DMA_TO_DEVICE, NULL);
273         wait_for_completion(&wait);
274         sreq->sr_request->waiting = NULL;
275         sreq->sr_result = req->errors;
276         if (req->errors)
277                 sreq->sr_result |= (DRIVER_ERROR << 24);
278
279         blk_put_request(req);
280 }
281
282 EXPORT_SYMBOL(scsi_wait_req);
283
284 /**
285  * scsi_execute - insert request and wait for the result
286  * @sdev:       scsi device
287  * @cmd:        scsi command
288  * @data_direction: data direction
289  * @buffer:     data buffer
290  * @bufflen:    len of buffer
291  * @sense:      optional sense buffer
292  * @timeout:    request timeout in seconds
293  * @retries:    number of times to retry request
294  * @flags:      or into request flags;
295  *
296  * scsi_execute_req returns the req->errors value which is the
297  * the scsi_cmnd result field.
298  **/
299 int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
300                  int data_direction, void *buffer, unsigned bufflen,
301                  unsigned char *sense, int timeout, int retries, int flags)
302 {
303         struct request *req;
304         int write = (data_direction == DMA_TO_DEVICE);
305         int ret = DRIVER_ERROR << 24;
306
307         req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
308
309         if (bufflen &&  blk_rq_map_kern(sdev->request_queue, req,
310                                         buffer, bufflen, __GFP_WAIT))
311                 goto out;
312
313         req->cmd_len = COMMAND_SIZE(cmd[0]);
314         memcpy(req->cmd, cmd, req->cmd_len);
315         req->sense = sense;
316         req->sense_len = 0;
317         req->timeout = timeout;
318         req->flags |= flags | REQ_BLOCK_PC | REQ_SPECIAL;
319
320         /*
321          * head injection *required* here otherwise quiesce won't work
322          */
323         blk_execute_rq(req->q, NULL, req, 1);
324
325         ret = req->errors;
326  out:
327         blk_put_request(req);
328
329         return ret;
330 }
331
332 EXPORT_SYMBOL(scsi_execute);
333
334 /*
335  * Function:    scsi_init_cmd_errh()
336  *
337  * Purpose:     Initialize cmd fields related to error handling.
338  *
339  * Arguments:   cmd     - command that is ready to be queued.
340  *
341  * Returns:     Nothing
342  *
343  * Notes:       This function has the job of initializing a number of
344  *              fields related to error handling.   Typically this will
345  *              be called once for each command, as required.
346  */
347 static int scsi_init_cmd_errh(struct scsi_cmnd *cmd)
348 {
349         cmd->serial_number = 0;
350
351         memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer);
352
353         if (cmd->cmd_len == 0)
354                 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
355
356         /*
357          * We need saved copies of a number of fields - this is because
358          * error handling may need to overwrite these with different values
359          * to run different commands, and once error handling is complete,
360          * we will need to restore these values prior to running the actual
361          * command.
362          */
363         cmd->old_use_sg = cmd->use_sg;
364         cmd->old_cmd_len = cmd->cmd_len;
365         cmd->sc_old_data_direction = cmd->sc_data_direction;
366         cmd->old_underflow = cmd->underflow;
367         memcpy(cmd->data_cmnd, cmd->cmnd, sizeof(cmd->cmnd));
368         cmd->buffer = cmd->request_buffer;
369         cmd->bufflen = cmd->request_bufflen;
370
371         return 1;
372 }
373
374 /*
375  * Function:   scsi_setup_cmd_retry()
376  *
377  * Purpose:    Restore the command state for a retry
378  *
379  * Arguments:  cmd      - command to be restored
380  *
381  * Returns:    Nothing
382  *
383  * Notes:      Immediately prior to retrying a command, we need
384  *             to restore certain fields that we saved above.
385  */
386 void scsi_setup_cmd_retry(struct scsi_cmnd *cmd)
387 {
388         memcpy(cmd->cmnd, cmd->data_cmnd, sizeof(cmd->data_cmnd));
389         cmd->request_buffer = cmd->buffer;
390         cmd->request_bufflen = cmd->bufflen;
391         cmd->use_sg = cmd->old_use_sg;
392         cmd->cmd_len = cmd->old_cmd_len;
393         cmd->sc_data_direction = cmd->sc_old_data_direction;
394         cmd->underflow = cmd->old_underflow;
395 }
396
397 void scsi_device_unbusy(struct scsi_device *sdev)
398 {
399         struct Scsi_Host *shost = sdev->host;
400         unsigned long flags;
401
402         spin_lock_irqsave(shost->host_lock, flags);
403         shost->host_busy--;
404         if (unlikely((shost->shost_state == SHOST_RECOVERY) &&
405                      shost->host_failed))
406                 scsi_eh_wakeup(shost);
407         spin_unlock(shost->host_lock);
408         spin_lock(sdev->request_queue->queue_lock);
409         sdev->device_busy--;
410         spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
411 }
412
413 /*
414  * Called for single_lun devices on IO completion. Clear starget_sdev_user,
415  * and call blk_run_queue for all the scsi_devices on the target -
416  * including current_sdev first.
417  *
418  * Called with *no* scsi locks held.
419  */
420 static void scsi_single_lun_run(struct scsi_device *current_sdev)
421 {
422         struct Scsi_Host *shost = current_sdev->host;
423         struct scsi_device *sdev, *tmp;
424         struct scsi_target *starget = scsi_target(current_sdev);
425         unsigned long flags;
426
427         spin_lock_irqsave(shost->host_lock, flags);
428         starget->starget_sdev_user = NULL;
429         spin_unlock_irqrestore(shost->host_lock, flags);
430
431         /*
432          * Call blk_run_queue for all LUNs on the target, starting with
433          * current_sdev. We race with others (to set starget_sdev_user),
434          * but in most cases, we will be first. Ideally, each LU on the
435          * target would get some limited time or requests on the target.
436          */
437         blk_run_queue(current_sdev->request_queue);
438
439         spin_lock_irqsave(shost->host_lock, flags);
440         if (starget->starget_sdev_user)
441                 goto out;
442         list_for_each_entry_safe(sdev, tmp, &starget->devices,
443                         same_target_siblings) {
444                 if (sdev == current_sdev)
445                         continue;
446                 if (scsi_device_get(sdev))
447                         continue;
448
449                 spin_unlock_irqrestore(shost->host_lock, flags);
450                 blk_run_queue(sdev->request_queue);
451                 spin_lock_irqsave(shost->host_lock, flags);
452         
453                 scsi_device_put(sdev);
454         }
455  out:
456         spin_unlock_irqrestore(shost->host_lock, flags);
457 }
458
459 /*
460  * Function:    scsi_run_queue()
461  *
462  * Purpose:     Select a proper request queue to serve next
463  *
464  * Arguments:   q       - last request's queue
465  *
466  * Returns:     Nothing
467  *
468  * Notes:       The previous command was completely finished, start
469  *              a new one if possible.
470  */
471 static void scsi_run_queue(struct request_queue *q)
472 {
473         struct scsi_device *sdev = q->queuedata;
474         struct Scsi_Host *shost = sdev->host;
475         unsigned long flags;
476
477         if (sdev->single_lun)
478                 scsi_single_lun_run(sdev);
479
480         spin_lock_irqsave(shost->host_lock, flags);
481         while (!list_empty(&shost->starved_list) &&
482                !shost->host_blocked && !shost->host_self_blocked &&
483                 !((shost->can_queue > 0) &&
484                   (shost->host_busy >= shost->can_queue))) {
485                 /*
486                  * As long as shost is accepting commands and we have
487                  * starved queues, call blk_run_queue. scsi_request_fn
488                  * drops the queue_lock and can add us back to the
489                  * starved_list.
490                  *
491                  * host_lock protects the starved_list and starved_entry.
492                  * scsi_request_fn must get the host_lock before checking
493                  * or modifying starved_list or starved_entry.
494                  */
495                 sdev = list_entry(shost->starved_list.next,
496                                           struct scsi_device, starved_entry);
497                 list_del_init(&sdev->starved_entry);
498                 spin_unlock_irqrestore(shost->host_lock, flags);
499
500                 blk_run_queue(sdev->request_queue);
501
502                 spin_lock_irqsave(shost->host_lock, flags);
503                 if (unlikely(!list_empty(&sdev->starved_entry)))
504                         /*
505                          * sdev lost a race, and was put back on the
506                          * starved list. This is unlikely but without this
507                          * in theory we could loop forever.
508                          */
509                         break;
510         }
511         spin_unlock_irqrestore(shost->host_lock, flags);
512
513         blk_run_queue(q);
514 }
515
516 /*
517  * Function:    scsi_requeue_command()
518  *
519  * Purpose:     Handle post-processing of completed commands.
520  *
521  * Arguments:   q       - queue to operate on
522  *              cmd     - command that may need to be requeued.
523  *
524  * Returns:     Nothing
525  *
526  * Notes:       After command completion, there may be blocks left
527  *              over which weren't finished by the previous command
528  *              this can be for a number of reasons - the main one is
529  *              I/O errors in the middle of the request, in which case
530  *              we need to request the blocks that come after the bad
531  *              sector.
532  */
533 static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
534 {
535         unsigned long flags;
536
537         cmd->request->flags &= ~REQ_DONTPREP;
538
539         spin_lock_irqsave(q->queue_lock, flags);
540         blk_requeue_request(q, cmd->request);
541         spin_unlock_irqrestore(q->queue_lock, flags);
542
543         scsi_run_queue(q);
544 }
545
546 void scsi_next_command(struct scsi_cmnd *cmd)
547 {
548         struct request_queue *q = cmd->device->request_queue;
549
550         scsi_put_command(cmd);
551         scsi_run_queue(q);
552 }
553
554 void scsi_run_host_queues(struct Scsi_Host *shost)
555 {
556         struct scsi_device *sdev;
557
558         shost_for_each_device(sdev, shost)
559                 scsi_run_queue(sdev->request_queue);
560 }
561
562 /*
563  * Function:    scsi_end_request()
564  *
565  * Purpose:     Post-processing of completed commands (usually invoked at end
566  *              of upper level post-processing and scsi_io_completion).
567  *
568  * Arguments:   cmd      - command that is complete.
569  *              uptodate - 1 if I/O indicates success, <= 0 for I/O error.
570  *              bytes    - number of bytes of completed I/O
571  *              requeue  - indicates whether we should requeue leftovers.
572  *
573  * Lock status: Assumed that lock is not held upon entry.
574  *
575  * Returns:     cmd if requeue done or required, NULL otherwise
576  *
577  * Notes:       This is called for block device requests in order to
578  *              mark some number of sectors as complete.
579  * 
580  *              We are guaranteeing that the request queue will be goosed
581  *              at some point during this call.
582  */
583 static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
584                                           int bytes, int requeue)
585 {
586         request_queue_t *q = cmd->device->request_queue;
587         struct request *req = cmd->request;
588         unsigned long flags;
589
590         /*
591          * If there are blocks left over at the end, set up the command
592          * to queue the remainder of them.
593          */
594         if (end_that_request_chunk(req, uptodate, bytes)) {
595                 int leftover = (req->hard_nr_sectors << 9);
596
597                 if (blk_pc_request(req))
598                         leftover = req->data_len;
599
600                 /* kill remainder if no retrys */
601                 if (!uptodate && blk_noretry_request(req))
602                         end_that_request_chunk(req, 0, leftover);
603                 else {
604                         if (requeue)
605                                 /*
606                                  * Bleah.  Leftovers again.  Stick the
607                                  * leftovers in the front of the
608                                  * queue, and goose the queue again.
609                                  */
610                                 scsi_requeue_command(q, cmd);
611
612                         return cmd;
613                 }
614         }
615
616         add_disk_randomness(req->rq_disk);
617
618         spin_lock_irqsave(q->queue_lock, flags);
619         if (blk_rq_tagged(req))
620                 blk_queue_end_tag(q, req);
621         end_that_request_last(req);
622         spin_unlock_irqrestore(q->queue_lock, flags);
623
624         /*
625          * This will goose the queue request function at the end, so we don't
626          * need to worry about launching another command.
627          */
628         scsi_next_command(cmd);
629         return NULL;
630 }
631
632 static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, int gfp_mask)
633 {
634         struct scsi_host_sg_pool *sgp;
635         struct scatterlist *sgl;
636
637         BUG_ON(!cmd->use_sg);
638
639         switch (cmd->use_sg) {
640         case 1 ... 8:
641                 cmd->sglist_len = 0;
642                 break;
643         case 9 ... 16:
644                 cmd->sglist_len = 1;
645                 break;
646         case 17 ... 32:
647                 cmd->sglist_len = 2;
648                 break;
649 #if (SCSI_MAX_PHYS_SEGMENTS > 32)
650         case 33 ... 64:
651                 cmd->sglist_len = 3;
652                 break;
653 #if (SCSI_MAX_PHYS_SEGMENTS > 64)
654         case 65 ... 128:
655                 cmd->sglist_len = 4;
656                 break;
657 #if (SCSI_MAX_PHYS_SEGMENTS  > 128)
658         case 129 ... 256:
659                 cmd->sglist_len = 5;
660                 break;
661 #endif
662 #endif
663 #endif
664         default:
665                 return NULL;
666         }
667
668         sgp = scsi_sg_pools + cmd->sglist_len;
669         sgl = mempool_alloc(sgp->pool, gfp_mask);
670         return sgl;
671 }
672
673 static void scsi_free_sgtable(struct scatterlist *sgl, int index)
674 {
675         struct scsi_host_sg_pool *sgp;
676
677         BUG_ON(index >= SG_MEMPOOL_NR);
678
679         sgp = scsi_sg_pools + index;
680         mempool_free(sgl, sgp->pool);
681 }
682
683 /*
684  * Function:    scsi_release_buffers()
685  *
686  * Purpose:     Completion processing for block device I/O requests.
687  *
688  * Arguments:   cmd     - command that we are bailing.
689  *
690  * Lock status: Assumed that no lock is held upon entry.
691  *
692  * Returns:     Nothing
693  *
694  * Notes:       In the event that an upper level driver rejects a
695  *              command, we must release resources allocated during
696  *              the __init_io() function.  Primarily this would involve
697  *              the scatter-gather table, and potentially any bounce
698  *              buffers.
699  */
700 static void scsi_release_buffers(struct scsi_cmnd *cmd)
701 {
702         struct request *req = cmd->request;
703
704         /*
705          * Free up any indirection buffers we allocated for DMA purposes. 
706          */
707         if (cmd->use_sg)
708                 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
709         else if (cmd->request_buffer != req->buffer)
710                 kfree(cmd->request_buffer);
711
712         /*
713          * Zero these out.  They now point to freed memory, and it is
714          * dangerous to hang onto the pointers.
715          */
716         cmd->buffer  = NULL;
717         cmd->bufflen = 0;
718         cmd->request_buffer = NULL;
719         cmd->request_bufflen = 0;
720 }
721
722 /*
723  * Function:    scsi_io_completion()
724  *
725  * Purpose:     Completion processing for block device I/O requests.
726  *
727  * Arguments:   cmd   - command that is finished.
728  *
729  * Lock status: Assumed that no lock is held upon entry.
730  *
731  * Returns:     Nothing
732  *
733  * Notes:       This function is matched in terms of capabilities to
734  *              the function that created the scatter-gather list.
735  *              In other words, if there are no bounce buffers
736  *              (the normal case for most drivers), we don't need
737  *              the logic to deal with cleaning up afterwards.
738  *
739  *              We must do one of several things here:
740  *
741  *              a) Call scsi_end_request.  This will finish off the
742  *                 specified number of sectors.  If we are done, the
743  *                 command block will be released, and the queue
744  *                 function will be goosed.  If we are not done, then
745  *                 scsi_end_request will directly goose the queue.
746  *
747  *              b) We can just use scsi_requeue_command() here.  This would
748  *                 be used if we just wanted to retry, for example.
749  */
750 void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
751                         unsigned int block_bytes)
752 {
753         int result = cmd->result;
754         int this_count = cmd->bufflen;
755         request_queue_t *q = cmd->device->request_queue;
756         struct request *req = cmd->request;
757         int clear_errors = 1;
758         struct scsi_sense_hdr sshdr;
759         int sense_valid = 0;
760         int sense_deferred = 0;
761
762         if (blk_complete_barrier_rq(q, req, good_bytes >> 9))
763                 return;
764
765         /*
766          * Free up any indirection buffers we allocated for DMA purposes. 
767          * For the case of a READ, we need to copy the data out of the
768          * bounce buffer and into the real buffer.
769          */
770         if (cmd->use_sg)
771                 scsi_free_sgtable(cmd->buffer, cmd->sglist_len);
772         else if (cmd->buffer != req->buffer) {
773                 if (rq_data_dir(req) == READ) {
774                         unsigned long flags;
775                         char *to = bio_kmap_irq(req->bio, &flags);
776                         memcpy(to, cmd->buffer, cmd->bufflen);
777                         bio_kunmap_irq(to, &flags);
778                 }
779                 kfree(cmd->buffer);
780         }
781
782         if (result) {
783                 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
784                 if (sense_valid)
785                         sense_deferred = scsi_sense_is_deferred(&sshdr);
786         }
787         if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
788                 req->errors = result;
789                 if (result) {
790                         clear_errors = 0;
791                         if (sense_valid && req->sense) {
792                                 /*
793                                  * SG_IO wants current and deferred errors
794                                  */
795                                 int len = 8 + cmd->sense_buffer[7];
796
797                                 if (len > SCSI_SENSE_BUFFERSIZE)
798                                         len = SCSI_SENSE_BUFFERSIZE;
799                                 memcpy(req->sense, cmd->sense_buffer,  len);
800                                 req->sense_len = len;
801                         }
802                 } else
803                         req->data_len = cmd->resid;
804         }
805
806         /*
807          * Zero these out.  They now point to freed memory, and it is
808          * dangerous to hang onto the pointers.
809          */
810         cmd->buffer  = NULL;
811         cmd->bufflen = 0;
812         cmd->request_buffer = NULL;
813         cmd->request_bufflen = 0;
814
815         /*
816          * Next deal with any sectors which we were able to correctly
817          * handle.
818          */
819         if (good_bytes >= 0) {
820                 SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, %d bytes done.\n",
821                                               req->nr_sectors, good_bytes));
822                 SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd->use_sg));
823
824                 if (clear_errors)
825                         req->errors = 0;
826                 /*
827                  * If multiple sectors are requested in one buffer, then
828                  * they will have been finished off by the first command.
829                  * If not, then we have a multi-buffer command.
830                  *
831                  * If block_bytes != 0, it means we had a medium error
832                  * of some sort, and that we want to mark some number of
833                  * sectors as not uptodate.  Thus we want to inhibit
834                  * requeueing right here - we will requeue down below
835                  * when we handle the bad sectors.
836                  */
837                 cmd = scsi_end_request(cmd, 1, good_bytes, result == 0);
838
839                 /*
840                  * If the command completed without error, then either finish off the
841                  * rest of the command, or start a new one.
842                  */
843                 if (result == 0 || cmd == NULL ) {
844                         return;
845                 }
846         }
847         /*
848          * Now, if we were good little boys and girls, Santa left us a request
849          * sense buffer.  We can extract information from this, so we
850          * can choose a block to remap, etc.
851          */
852         if (sense_valid && !sense_deferred) {
853                 switch (sshdr.sense_key) {
854                 case UNIT_ATTENTION:
855                         if (cmd->device->removable) {
856                                 /* detected disc change.  set a bit 
857                                  * and quietly refuse further access.
858                                  */
859                                 cmd->device->changed = 1;
860                                 cmd = scsi_end_request(cmd, 0,
861                                                 this_count, 1);
862                                 return;
863                         } else {
864                                 /*
865                                 * Must have been a power glitch, or a
866                                 * bus reset.  Could not have been a
867                                 * media change, so we just retry the
868                                 * request and see what happens.  
869                                 */
870                                 scsi_requeue_command(q, cmd);
871                                 return;
872                         }
873                         break;
874                 case ILLEGAL_REQUEST:
875                         /*
876                         * If we had an ILLEGAL REQUEST returned, then we may
877                         * have performed an unsupported command.  The only
878                         * thing this should be would be a ten byte read where
879                         * only a six byte read was supported.  Also, on a
880                         * system where READ CAPACITY failed, we may have read
881                         * past the end of the disk.
882                         */
883                         if (cmd->device->use_10_for_rw &&
884                             (cmd->cmnd[0] == READ_10 ||
885                              cmd->cmnd[0] == WRITE_10)) {
886                                 cmd->device->use_10_for_rw = 0;
887                                 /*
888                                  * This will cause a retry with a 6-byte
889                                  * command.
890                                  */
891                                 scsi_requeue_command(q, cmd);
892                                 result = 0;
893                         } else {
894                                 cmd = scsi_end_request(cmd, 0, this_count, 1);
895                                 return;
896                         }
897                         break;
898                 case NOT_READY:
899                         /*
900                          * If the device is in the process of becoming ready,
901                          * retry.
902                          */
903                         if (sshdr.asc == 0x04 && sshdr.ascq == 0x01) {
904                                 scsi_requeue_command(q, cmd);
905                                 return;
906                         }
907                         printk(KERN_INFO "Device %s not ready.\n",
908                                req->rq_disk ? req->rq_disk->disk_name : "");
909                         cmd = scsi_end_request(cmd, 0, this_count, 1);
910                         return;
911                 case VOLUME_OVERFLOW:
912                         printk(KERN_INFO "Volume overflow <%d %d %d %d> CDB: ",
913                                cmd->device->host->host_no,
914                                (int)cmd->device->channel,
915                                (int)cmd->device->id, (int)cmd->device->lun);
916                         __scsi_print_command(cmd->data_cmnd);
917                         scsi_print_sense("", cmd);
918                         cmd = scsi_end_request(cmd, 0, block_bytes, 1);
919                         return;
920                 default:
921                         break;
922                 }
923         }                       /* driver byte != 0 */
924         if (host_byte(result) == DID_RESET) {
925                 /*
926                  * Third party bus reset or reset for error
927                  * recovery reasons.  Just retry the request
928                  * and see what happens.  
929                  */
930                 scsi_requeue_command(q, cmd);
931                 return;
932         }
933         if (result) {
934                 if (!(req->flags & REQ_SPECIAL))
935                         printk(KERN_INFO "SCSI error : <%d %d %d %d> return code "
936                                "= 0x%x\n", cmd->device->host->host_no,
937                                cmd->device->channel,
938                                cmd->device->id,
939                                cmd->device->lun, result);
940
941                 if (driver_byte(result) & DRIVER_SENSE)
942                         scsi_print_sense("", cmd);
943                 /*
944                  * Mark a single buffer as not uptodate.  Queue the remainder.
945                  * We sometimes get this cruft in the event that a medium error
946                  * isn't properly reported.
947                  */
948                 block_bytes = req->hard_cur_sectors << 9;
949                 if (!block_bytes)
950                         block_bytes = req->data_len;
951                 cmd = scsi_end_request(cmd, 0, block_bytes, 1);
952         }
953 }
954 EXPORT_SYMBOL(scsi_io_completion);
955
956 /*
957  * Function:    scsi_init_io()
958  *
959  * Purpose:     SCSI I/O initialize function.
960  *
961  * Arguments:   cmd   - Command descriptor we wish to initialize
962  *
963  * Returns:     0 on success
964  *              BLKPREP_DEFER if the failure is retryable
965  *              BLKPREP_KILL if the failure is fatal
966  */
967 static int scsi_init_io(struct scsi_cmnd *cmd)
968 {
969         struct request     *req = cmd->request;
970         struct scatterlist *sgpnt;
971         int                count;
972
973         /*
974          * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer
975          */
976         if ((req->flags & REQ_BLOCK_PC) && !req->bio) {
977                 cmd->request_bufflen = req->data_len;
978                 cmd->request_buffer = req->data;
979                 req->buffer = req->data;
980                 cmd->use_sg = 0;
981                 return 0;
982         }
983
984         /*
985          * we used to not use scatter-gather for single segment request,
986          * but now we do (it makes highmem I/O easier to support without
987          * kmapping pages)
988          */
989         cmd->use_sg = req->nr_phys_segments;
990
991         /*
992          * if sg table allocation fails, requeue request later.
993          */
994         sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
995         if (unlikely(!sgpnt))
996                 return BLKPREP_DEFER;
997
998         cmd->request_buffer = (char *) sgpnt;
999         cmd->request_bufflen = req->nr_sectors << 9;
1000         if (blk_pc_request(req))
1001                 cmd->request_bufflen = req->data_len;
1002         req->buffer = NULL;
1003
1004         /* 
1005          * Next, walk the list, and fill in the addresses and sizes of
1006          * each segment.
1007          */
1008         count = blk_rq_map_sg(req->q, req, cmd->request_buffer);
1009
1010         /*
1011          * mapped well, send it off
1012          */
1013         if (likely(count <= cmd->use_sg)) {
1014                 cmd->use_sg = count;
1015                 return 0;
1016         }
1017
1018         printk(KERN_ERR "Incorrect number of segments after building list\n");
1019         printk(KERN_ERR "counted %d, received %d\n", count, cmd->use_sg);
1020         printk(KERN_ERR "req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors,
1021                         req->current_nr_sectors);
1022
1023         /* release the command and kill it */
1024         scsi_release_buffers(cmd);
1025         scsi_put_command(cmd);
1026         return BLKPREP_KILL;
1027 }
1028
1029 static int scsi_prepare_flush_fn(request_queue_t *q, struct request *rq)
1030 {
1031         struct scsi_device *sdev = q->queuedata;
1032         struct scsi_driver *drv;
1033
1034         if (sdev->sdev_state == SDEV_RUNNING) {
1035                 drv = *(struct scsi_driver **) rq->rq_disk->private_data;
1036
1037                 if (drv->prepare_flush)
1038                         return drv->prepare_flush(q, rq);
1039         }
1040
1041         return 0;
1042 }
1043
1044 static void scsi_end_flush_fn(request_queue_t *q, struct request *rq)
1045 {
1046         struct scsi_device *sdev = q->queuedata;
1047         struct request *flush_rq = rq->end_io_data;
1048         struct scsi_driver *drv;
1049
1050         if (flush_rq->errors) {
1051                 printk("scsi: barrier error, disabling flush support\n");
1052                 blk_queue_ordered(q, QUEUE_ORDERED_NONE);
1053         }
1054
1055         if (sdev->sdev_state == SDEV_RUNNING) {
1056                 drv = *(struct scsi_driver **) rq->rq_disk->private_data;
1057                 drv->end_flush(q, rq);
1058         }
1059 }
1060
1061 static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
1062                                sector_t *error_sector)
1063 {
1064         struct scsi_device *sdev = q->queuedata;
1065         struct scsi_driver *drv;
1066
1067         if (sdev->sdev_state != SDEV_RUNNING)
1068                 return -ENXIO;
1069
1070         drv = *(struct scsi_driver **) disk->private_data;
1071         if (drv->issue_flush)
1072                 return drv->issue_flush(&sdev->sdev_gendev, error_sector);
1073
1074         return -EOPNOTSUPP;
1075 }
1076
1077 static void scsi_generic_done(struct scsi_cmnd *cmd)
1078 {
1079         BUG_ON(!blk_pc_request(cmd->request));
1080         scsi_io_completion(cmd, cmd->result == 0 ? cmd->bufflen : 0, 0);
1081 }
1082
1083 static int scsi_prep_fn(struct request_queue *q, struct request *req)
1084 {
1085         struct scsi_device *sdev = q->queuedata;
1086         struct scsi_cmnd *cmd;
1087         int specials_only = 0;
1088
1089         /*
1090          * Just check to see if the device is online.  If it isn't, we
1091          * refuse to process any commands.  The device must be brought
1092          * online before trying any recovery commands
1093          */
1094         if (unlikely(!scsi_device_online(sdev))) {
1095                 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n",
1096                        sdev->host->host_no, sdev->id, sdev->lun);
1097                 return BLKPREP_KILL;
1098         }
1099         if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1100                 /* OK, we're not in a running state don't prep
1101                  * user commands */
1102                 if (sdev->sdev_state == SDEV_DEL) {
1103                         /* Device is fully deleted, no commands
1104                          * at all allowed down */
1105                         printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to dead device\n",
1106                                sdev->host->host_no, sdev->id, sdev->lun);
1107                         return BLKPREP_KILL;
1108                 }
1109                 /* OK, we only allow special commands (i.e. not
1110                  * user initiated ones */
1111                 specials_only = sdev->sdev_state;
1112         }
1113
1114         /*
1115          * Find the actual device driver associated with this command.
1116          * The SPECIAL requests are things like character device or
1117          * ioctls, which did not originate from ll_rw_blk.  Note that
1118          * the special field is also used to indicate the cmd for
1119          * the remainder of a partially fulfilled request that can 
1120          * come up when there is a medium error.  We have to treat
1121          * these two cases differently.  We differentiate by looking
1122          * at request->cmd, as this tells us the real story.
1123          */
1124         if (req->flags & REQ_SPECIAL && req->special) {
1125                 struct scsi_request *sreq = req->special;
1126
1127                 if (sreq->sr_magic == SCSI_REQ_MAGIC) {
1128                         cmd = scsi_get_command(sreq->sr_device, GFP_ATOMIC);
1129                         if (unlikely(!cmd))
1130                                 goto defer;
1131                         scsi_init_cmd_from_req(cmd, sreq);
1132                 } else
1133                         cmd = req->special;
1134         } else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1135
1136                 if(unlikely(specials_only) && !(req->flags & REQ_SPECIAL)) {
1137                         if(specials_only == SDEV_QUIESCE ||
1138                                         specials_only == SDEV_BLOCK)
1139                                 return BLKPREP_DEFER;
1140                         
1141                         printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to device being removed\n",
1142                                sdev->host->host_no, sdev->id, sdev->lun);
1143                         return BLKPREP_KILL;
1144                 }
1145                         
1146                         
1147                 /*
1148                  * Now try and find a command block that we can use.
1149                  */
1150                 if (!req->special) {
1151                         cmd = scsi_get_command(sdev, GFP_ATOMIC);
1152                         if (unlikely(!cmd))
1153                                 goto defer;
1154                 } else
1155                         cmd = req->special;
1156                 
1157                 /* pull a tag out of the request if we have one */
1158                 cmd->tag = req->tag;
1159         } else {
1160                 blk_dump_rq_flags(req, "SCSI bad req");
1161                 return BLKPREP_KILL;
1162         }
1163         
1164         /* note the overloading of req->special.  When the tag
1165          * is active it always means cmd.  If the tag goes
1166          * back for re-queueing, it may be reset */
1167         req->special = cmd;
1168         cmd->request = req;
1169         
1170         /*
1171          * FIXME: drop the lock here because the functions below
1172          * expect to be called without the queue lock held.  Also,
1173          * previously, we dequeued the request before dropping the
1174          * lock.  We hope REQ_STARTED prevents anything untoward from
1175          * happening now.
1176          */
1177         if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1178                 struct scsi_driver *drv;
1179                 int ret;
1180
1181                 /*
1182                  * This will do a couple of things:
1183                  *  1) Fill in the actual SCSI command.
1184                  *  2) Fill in any other upper-level specific fields
1185                  * (timeout).
1186                  *
1187                  * If this returns 0, it means that the request failed
1188                  * (reading past end of disk, reading offline device,
1189                  * etc).   This won't actually talk to the device, but
1190                  * some kinds of consistency checking may cause the     
1191                  * request to be rejected immediately.
1192                  */
1193
1194                 /* 
1195                  * This sets up the scatter-gather table (allocating if
1196                  * required).
1197                  */
1198                 ret = scsi_init_io(cmd);
1199                 if (ret)        /* BLKPREP_KILL return also releases the command */
1200                         return ret;
1201                 
1202                 /*
1203                  * Initialize the actual SCSI command for this request.
1204                  */
1205                 if (req->rq_disk) {
1206                         drv = *(struct scsi_driver **)req->rq_disk->private_data;
1207                         if (unlikely(!drv->init_command(cmd))) {
1208                                 scsi_release_buffers(cmd);
1209                                 scsi_put_command(cmd);
1210                                 return BLKPREP_KILL;
1211                         }
1212                 } else {
1213                         memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
1214                         if (rq_data_dir(req) == WRITE)
1215                                 cmd->sc_data_direction = DMA_TO_DEVICE;
1216                         else if (req->data_len)
1217                                 cmd->sc_data_direction = DMA_FROM_DEVICE;
1218                         else
1219                                 cmd->sc_data_direction = DMA_NONE;
1220                         
1221                         cmd->transfersize = req->data_len;
1222                         cmd->allowed = 3;
1223                         cmd->timeout_per_command = req->timeout;
1224                         cmd->done = scsi_generic_done;
1225                 }
1226         }
1227
1228         /*
1229          * The request is now prepped, no need to come back here
1230          */
1231         req->flags |= REQ_DONTPREP;
1232         return BLKPREP_OK;
1233
1234  defer:
1235         /* If we defer, the elv_next_request() returns NULL, but the
1236          * queue must be restarted, so we plug here if no returning
1237          * command will automatically do that. */
1238         if (sdev->device_busy == 0)
1239                 blk_plug_device(q);
1240         return BLKPREP_DEFER;
1241 }
1242
1243 /*
1244  * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1245  * return 0.
1246  *
1247  * Called with the queue_lock held.
1248  */
1249 static inline int scsi_dev_queue_ready(struct request_queue *q,
1250                                   struct scsi_device *sdev)
1251 {
1252         if (sdev->device_busy >= sdev->queue_depth)
1253                 return 0;
1254         if (sdev->device_busy == 0 && sdev->device_blocked) {
1255                 /*
1256                  * unblock after device_blocked iterates to zero
1257                  */
1258                 if (--sdev->device_blocked == 0) {
1259                         SCSI_LOG_MLQUEUE(3,
1260                                 printk("scsi%d (%d:%d) unblocking device at"
1261                                        " zero depth\n", sdev->host->host_no,
1262                                        sdev->id, sdev->lun));
1263                 } else {
1264                         blk_plug_device(q);
1265                         return 0;
1266                 }
1267         }
1268         if (sdev->device_blocked)
1269                 return 0;
1270
1271         return 1;
1272 }
1273
1274 /*
1275  * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1276  * return 0. We must end up running the queue again whenever 0 is
1277  * returned, else IO can hang.
1278  *
1279  * Called with host_lock held.
1280  */
1281 static inline int scsi_host_queue_ready(struct request_queue *q,
1282                                    struct Scsi_Host *shost,
1283                                    struct scsi_device *sdev)
1284 {
1285         if (shost->shost_state == SHOST_RECOVERY)
1286                 return 0;
1287         if (shost->host_busy == 0 && shost->host_blocked) {
1288                 /*
1289                  * unblock after host_blocked iterates to zero
1290                  */
1291                 if (--shost->host_blocked == 0) {
1292                         SCSI_LOG_MLQUEUE(3,
1293                                 printk("scsi%d unblocking host at zero depth\n",
1294                                         shost->host_no));
1295                 } else {
1296                         blk_plug_device(q);
1297                         return 0;
1298                 }
1299         }
1300         if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
1301             shost->host_blocked || shost->host_self_blocked) {
1302                 if (list_empty(&sdev->starved_entry))
1303                         list_add_tail(&sdev->starved_entry, &shost->starved_list);
1304                 return 0;
1305         }
1306
1307         /* We're OK to process the command, so we can't be starved */
1308         if (!list_empty(&sdev->starved_entry))
1309                 list_del_init(&sdev->starved_entry);
1310
1311         return 1;
1312 }
1313
1314 /*
1315  * Kill requests for a dead device
1316  */
1317 static void scsi_kill_requests(request_queue_t *q)
1318 {
1319         struct request *req;
1320
1321         while ((req = elv_next_request(q)) != NULL) {
1322                 blkdev_dequeue_request(req);
1323                 req->flags |= REQ_QUIET;
1324                 while (end_that_request_first(req, 0, req->nr_sectors))
1325                         ;
1326                 end_that_request_last(req);
1327         }
1328 }
1329
1330 /*
1331  * Function:    scsi_request_fn()
1332  *
1333  * Purpose:     Main strategy routine for SCSI.
1334  *
1335  * Arguments:   q       - Pointer to actual queue.
1336  *
1337  * Returns:     Nothing
1338  *
1339  * Lock status: IO request lock assumed to be held when called.
1340  */
1341 static void scsi_request_fn(struct request_queue *q)
1342 {
1343         struct scsi_device *sdev = q->queuedata;
1344         struct Scsi_Host *shost;
1345         struct scsi_cmnd *cmd;
1346         struct request *req;
1347
1348         if (!sdev) {
1349                 printk("scsi: killing requests for dead queue\n");
1350                 scsi_kill_requests(q);
1351                 return;
1352         }
1353
1354         if(!get_device(&sdev->sdev_gendev))
1355                 /* We must be tearing the block queue down already */
1356                 return;
1357
1358         /*
1359          * To start with, we keep looping until the queue is empty, or until
1360          * the host is no longer able to accept any more requests.
1361          */
1362         shost = sdev->host;
1363         while (!blk_queue_plugged(q)) {
1364                 int rtn;
1365                 /*
1366                  * get next queueable request.  We do this early to make sure
1367                  * that the request is fully prepared even if we cannot 
1368                  * accept it.
1369                  */
1370                 req = elv_next_request(q);
1371                 if (!req || !scsi_dev_queue_ready(q, sdev))
1372                         break;
1373
1374                 if (unlikely(!scsi_device_online(sdev))) {
1375                         printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n",
1376                                sdev->host->host_no, sdev->id, sdev->lun);
1377                         blkdev_dequeue_request(req);
1378                         req->flags |= REQ_QUIET;
1379                         while (end_that_request_first(req, 0, req->nr_sectors))
1380                                 ;
1381                         end_that_request_last(req);
1382                         continue;
1383                 }
1384
1385
1386                 /*
1387                  * Remove the request from the request list.
1388                  */
1389                 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1390                         blkdev_dequeue_request(req);
1391                 sdev->device_busy++;
1392
1393                 spin_unlock(q->queue_lock);
1394                 spin_lock(shost->host_lock);
1395
1396                 if (!scsi_host_queue_ready(q, shost, sdev))
1397                         goto not_ready;
1398                 if (sdev->single_lun) {
1399                         if (scsi_target(sdev)->starget_sdev_user &&
1400                             scsi_target(sdev)->starget_sdev_user != sdev)
1401                                 goto not_ready;
1402                         scsi_target(sdev)->starget_sdev_user = sdev;
1403                 }
1404                 shost->host_busy++;
1405
1406                 /*
1407                  * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1408                  *              take the lock again.
1409                  */
1410                 spin_unlock_irq(shost->host_lock);
1411
1412                 cmd = req->special;
1413                 if (unlikely(cmd == NULL)) {
1414                         printk(KERN_CRIT "impossible request in %s.\n"
1415                                          "please mail a stack trace to "
1416                                          "linux-scsi@vger.kernel.org",
1417                                          __FUNCTION__);
1418                         BUG();
1419                 }
1420
1421                 /*
1422                  * Finally, initialize any error handling parameters, and set up
1423                  * the timers for timeouts.
1424                  */
1425                 scsi_init_cmd_errh(cmd);
1426
1427                 /*
1428                  * Dispatch the command to the low-level driver.
1429                  */
1430                 rtn = scsi_dispatch_cmd(cmd);
1431                 spin_lock_irq(q->queue_lock);
1432                 if(rtn) {
1433                         /* we're refusing the command; because of
1434                          * the way locks get dropped, we need to 
1435                          * check here if plugging is required */
1436                         if(sdev->device_busy == 0)
1437                                 blk_plug_device(q);
1438
1439                         break;
1440                 }
1441         }
1442
1443         goto out;
1444
1445  not_ready:
1446         spin_unlock_irq(shost->host_lock);
1447
1448         /*
1449          * lock q, handle tag, requeue req, and decrement device_busy. We
1450          * must return with queue_lock held.
1451          *
1452          * Decrementing device_busy without checking it is OK, as all such
1453          * cases (host limits or settings) should run the queue at some
1454          * later time.
1455          */
1456         spin_lock_irq(q->queue_lock);
1457         blk_requeue_request(q, req);
1458         sdev->device_busy--;
1459         if(sdev->device_busy == 0)
1460                 blk_plug_device(q);
1461  out:
1462         /* must be careful here...if we trigger the ->remove() function
1463          * we cannot be holding the q lock */
1464         spin_unlock_irq(q->queue_lock);
1465         put_device(&sdev->sdev_gendev);
1466         spin_lock_irq(q->queue_lock);
1467 }
1468
1469 u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1470 {
1471         struct device *host_dev;
1472         u64 bounce_limit = 0xffffffff;
1473
1474         if (shost->unchecked_isa_dma)
1475                 return BLK_BOUNCE_ISA;
1476         /*
1477          * Platforms with virtual-DMA translation
1478          * hardware have no practical limit.
1479          */
1480         if (!PCI_DMA_BUS_IS_PHYS)
1481                 return BLK_BOUNCE_ANY;
1482
1483         host_dev = scsi_get_device(shost);
1484         if (host_dev && host_dev->dma_mask)
1485                 bounce_limit = *host_dev->dma_mask;
1486
1487         return bounce_limit;
1488 }
1489 EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1490
1491 struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1492 {
1493         struct Scsi_Host *shost = sdev->host;
1494         struct request_queue *q;
1495
1496         q = blk_init_queue(scsi_request_fn, NULL);
1497         if (!q)
1498                 return NULL;
1499
1500         blk_queue_prep_rq(q, scsi_prep_fn);
1501
1502         blk_queue_max_hw_segments(q, shost->sg_tablesize);
1503         blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS);
1504         blk_queue_max_sectors(q, shost->max_sectors);
1505         blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1506         blk_queue_segment_boundary(q, shost->dma_boundary);
1507         blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
1508
1509         /*
1510          * ordered tags are superior to flush ordering
1511          */
1512         if (shost->ordered_tag)
1513                 blk_queue_ordered(q, QUEUE_ORDERED_TAG);
1514         else if (shost->ordered_flush) {
1515                 blk_queue_ordered(q, QUEUE_ORDERED_FLUSH);
1516                 q->prepare_flush_fn = scsi_prepare_flush_fn;
1517                 q->end_flush_fn = scsi_end_flush_fn;
1518         }
1519
1520         if (!shost->use_clustering)
1521                 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
1522         return q;
1523 }
1524
1525 void scsi_free_queue(struct request_queue *q)
1526 {
1527         blk_cleanup_queue(q);
1528 }
1529
1530 /*
1531  * Function:    scsi_block_requests()
1532  *
1533  * Purpose:     Utility function used by low-level drivers to prevent further
1534  *              commands from being queued to the device.
1535  *
1536  * Arguments:   shost       - Host in question
1537  *
1538  * Returns:     Nothing
1539  *
1540  * Lock status: No locks are assumed held.
1541  *
1542  * Notes:       There is no timer nor any other means by which the requests
1543  *              get unblocked other than the low-level driver calling
1544  *              scsi_unblock_requests().
1545  */
1546 void scsi_block_requests(struct Scsi_Host *shost)
1547 {
1548         shost->host_self_blocked = 1;
1549 }
1550 EXPORT_SYMBOL(scsi_block_requests);
1551
1552 /*
1553  * Function:    scsi_unblock_requests()
1554  *
1555  * Purpose:     Utility function used by low-level drivers to allow further
1556  *              commands from being queued to the device.
1557  *
1558  * Arguments:   shost       - Host in question
1559  *
1560  * Returns:     Nothing
1561  *
1562  * Lock status: No locks are assumed held.
1563  *
1564  * Notes:       There is no timer nor any other means by which the requests
1565  *              get unblocked other than the low-level driver calling
1566  *              scsi_unblock_requests().
1567  *
1568  *              This is done as an API function so that changes to the
1569  *              internals of the scsi mid-layer won't require wholesale
1570  *              changes to drivers that use this feature.
1571  */
1572 void scsi_unblock_requests(struct Scsi_Host *shost)
1573 {
1574         shost->host_self_blocked = 0;
1575         scsi_run_host_queues(shost);
1576 }
1577 EXPORT_SYMBOL(scsi_unblock_requests);
1578
1579 int __init scsi_init_queue(void)
1580 {
1581         int i;
1582
1583         for (i = 0; i < SG_MEMPOOL_NR; i++) {
1584                 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1585                 int size = sgp->size * sizeof(struct scatterlist);
1586
1587                 sgp->slab = kmem_cache_create(sgp->name, size, 0,
1588                                 SLAB_HWCACHE_ALIGN, NULL, NULL);
1589                 if (!sgp->slab) {
1590                         printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1591                                         sgp->name);
1592                 }
1593
1594                 sgp->pool = mempool_create(SG_MEMPOOL_SIZE,
1595                                 mempool_alloc_slab, mempool_free_slab,
1596                                 sgp->slab);
1597                 if (!sgp->pool) {
1598                         printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1599                                         sgp->name);
1600                 }
1601         }
1602
1603         return 0;
1604 }
1605
1606 void scsi_exit_queue(void)
1607 {
1608         int i;
1609
1610         for (i = 0; i < SG_MEMPOOL_NR; i++) {
1611                 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1612                 mempool_destroy(sgp->pool);
1613                 kmem_cache_destroy(sgp->slab);
1614         }
1615 }
1616 /**
1617  *      __scsi_mode_sense - issue a mode sense, falling back from 10 to 
1618  *              six bytes if necessary.
1619  *      @sdev:  SCSI device to be queried
1620  *      @dbd:   set if mode sense will allow block descriptors to be returned
1621  *      @modepage: mode page being requested
1622  *      @buffer: request buffer (may not be smaller than eight bytes)
1623  *      @len:   length of request buffer.
1624  *      @timeout: command timeout
1625  *      @retries: number of retries before failing
1626  *      @data: returns a structure abstracting the mode header data
1627  *      @sense: place to put sense data (or NULL if no sense to be collected).
1628  *              must be SCSI_SENSE_BUFFERSIZE big.
1629  *
1630  *      Returns zero if unsuccessful, or the header offset (either 4
1631  *      or 8 depending on whether a six or ten byte command was
1632  *      issued) if successful.
1633  **/
1634 int
1635 scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1636                   unsigned char *buffer, int len, int timeout, int retries,
1637                   struct scsi_mode_data *data, char *sense) {
1638         unsigned char cmd[12];
1639         int use_10_for_ms;
1640         int header_length;
1641         int result;
1642         char *sense_buffer = NULL;
1643
1644         memset(data, 0, sizeof(*data));
1645         memset(&cmd[0], 0, 12);
1646         cmd[1] = dbd & 0x18;    /* allows DBD and LLBA bits */
1647         cmd[2] = modepage;
1648
1649         if (!sense) {
1650                 sense_buffer = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
1651                 if (!sense_buffer) {
1652                         dev_printk(KERN_ERR, &sdev->sdev_gendev, "failed to allocate sense buffer\n");
1653                         return 0;
1654                 }
1655                 sense = sense_buffer;
1656         }
1657  retry:
1658         use_10_for_ms = sdev->use_10_for_ms;
1659
1660         if (use_10_for_ms) {
1661                 if (len < 8)
1662                         len = 8;
1663
1664                 cmd[0] = MODE_SENSE_10;
1665                 cmd[8] = len;
1666                 header_length = 8;
1667         } else {
1668                 if (len < 4)
1669                         len = 4;
1670
1671                 cmd[0] = MODE_SENSE;
1672                 cmd[4] = len;
1673                 header_length = 4;
1674         }
1675
1676         memset(sense, 0, SCSI_SENSE_BUFFERSIZE);
1677
1678         memset(buffer, 0, len);
1679
1680         result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
1681                                   sense, timeout, retries);
1682
1683         /* This code looks awful: what it's doing is making sure an
1684          * ILLEGAL REQUEST sense return identifies the actual command
1685          * byte as the problem.  MODE_SENSE commands can return
1686          * ILLEGAL REQUEST if the code page isn't supported */
1687
1688         if (use_10_for_ms && !scsi_status_is_good(result) &&
1689             (driver_byte(result) & DRIVER_SENSE)) {
1690                 struct scsi_sense_hdr sshdr;
1691
1692                 if (scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)) {
1693                         if ((sshdr.sense_key == ILLEGAL_REQUEST) &&
1694                             (sshdr.asc == 0x20) && (sshdr.ascq == 0)) {
1695                                 /* 
1696                                  * Invalid command operation code
1697                                  */
1698                                 sdev->use_10_for_ms = 0;
1699                                 goto retry;
1700                         }
1701                 }
1702         }
1703
1704         if(scsi_status_is_good(result)) {
1705                 data->header_length = header_length;
1706                 if(use_10_for_ms) {
1707                         data->length = buffer[0]*256 + buffer[1] + 2;
1708                         data->medium_type = buffer[2];
1709                         data->device_specific = buffer[3];
1710                         data->longlba = buffer[4] & 0x01;
1711                         data->block_descriptor_length = buffer[6]*256
1712                                 + buffer[7];
1713                 } else {
1714                         data->length = buffer[0] + 1;
1715                         data->medium_type = buffer[1];
1716                         data->device_specific = buffer[2];
1717                         data->block_descriptor_length = buffer[3];
1718                 }
1719         }
1720
1721         kfree(sense_buffer);
1722         return result;
1723 }
1724 EXPORT_SYMBOL(scsi_mode_sense);
1725
1726 int
1727 scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries)
1728 {
1729         char cmd[] = {
1730                 TEST_UNIT_READY, 0, 0, 0, 0, 0,
1731         };
1732         char sense[SCSI_SENSE_BUFFERSIZE];
1733         int result;
1734         
1735         result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sense,
1736                                   timeout, retries);
1737
1738         if ((driver_byte(result) & DRIVER_SENSE) && sdev->removable) {
1739                 struct scsi_sense_hdr sshdr;
1740
1741                 if ((scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE,
1742                                           &sshdr)) &&
1743                     ((sshdr.sense_key == UNIT_ATTENTION) ||
1744                      (sshdr.sense_key == NOT_READY))) {
1745                         sdev->changed = 1;
1746                         result = 0;
1747                 }
1748         }
1749         return result;
1750 }
1751 EXPORT_SYMBOL(scsi_test_unit_ready);
1752
1753 /**
1754  *      scsi_device_set_state - Take the given device through the device
1755  *              state model.
1756  *      @sdev:  scsi device to change the state of.
1757  *      @state: state to change to.
1758  *
1759  *      Returns zero if unsuccessful or an error if the requested 
1760  *      transition is illegal.
1761  **/
1762 int
1763 scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
1764 {
1765         enum scsi_device_state oldstate = sdev->sdev_state;
1766
1767         if (state == oldstate)
1768                 return 0;
1769
1770         switch (state) {
1771         case SDEV_CREATED:
1772                 /* There are no legal states that come back to
1773                  * created.  This is the manually initialised start
1774                  * state */
1775                 goto illegal;
1776                         
1777         case SDEV_RUNNING:
1778                 switch (oldstate) {
1779                 case SDEV_CREATED:
1780                 case SDEV_OFFLINE:
1781                 case SDEV_QUIESCE:
1782                 case SDEV_BLOCK:
1783                         break;
1784                 default:
1785                         goto illegal;
1786                 }
1787                 break;
1788
1789         case SDEV_QUIESCE:
1790                 switch (oldstate) {
1791                 case SDEV_RUNNING:
1792                 case SDEV_OFFLINE:
1793                         break;
1794                 default:
1795                         goto illegal;
1796                 }
1797                 break;
1798
1799         case SDEV_OFFLINE:
1800                 switch (oldstate) {
1801                 case SDEV_CREATED:
1802                 case SDEV_RUNNING:
1803                 case SDEV_QUIESCE:
1804                 case SDEV_BLOCK:
1805                         break;
1806                 default:
1807                         goto illegal;
1808                 }
1809                 break;
1810
1811         case SDEV_BLOCK:
1812                 switch (oldstate) {
1813                 case SDEV_CREATED:
1814                 case SDEV_RUNNING:
1815                         break;
1816                 default:
1817                         goto illegal;
1818                 }
1819                 break;
1820
1821         case SDEV_CANCEL:
1822                 switch (oldstate) {
1823                 case SDEV_CREATED:
1824                 case SDEV_RUNNING:
1825                 case SDEV_OFFLINE:
1826                 case SDEV_BLOCK:
1827                         break;
1828                 default:
1829                         goto illegal;
1830                 }
1831                 break;
1832
1833         case SDEV_DEL:
1834                 switch (oldstate) {
1835                 case SDEV_CANCEL:
1836                         break;
1837                 default:
1838                         goto illegal;
1839                 }
1840                 break;
1841
1842         }
1843         sdev->sdev_state = state;
1844         return 0;
1845
1846  illegal:
1847         SCSI_LOG_ERROR_RECOVERY(1, 
1848                                 dev_printk(KERN_ERR, &sdev->sdev_gendev,
1849                                            "Illegal state transition %s->%s\n",
1850                                            scsi_device_state_name(oldstate),
1851                                            scsi_device_state_name(state))
1852                                 );
1853         return -EINVAL;
1854 }
1855 EXPORT_SYMBOL(scsi_device_set_state);
1856
1857 /**
1858  *      scsi_device_quiesce - Block user issued commands.
1859  *      @sdev:  scsi device to quiesce.
1860  *
1861  *      This works by trying to transition to the SDEV_QUIESCE state
1862  *      (which must be a legal transition).  When the device is in this
1863  *      state, only special requests will be accepted, all others will
1864  *      be deferred.  Since special requests may also be requeued requests,
1865  *      a successful return doesn't guarantee the device will be 
1866  *      totally quiescent.
1867  *
1868  *      Must be called with user context, may sleep.
1869  *
1870  *      Returns zero if unsuccessful or an error if not.
1871  **/
1872 int
1873 scsi_device_quiesce(struct scsi_device *sdev)
1874 {
1875         int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
1876         if (err)
1877                 return err;
1878
1879         scsi_run_queue(sdev->request_queue);
1880         while (sdev->device_busy) {
1881                 msleep_interruptible(200);
1882                 scsi_run_queue(sdev->request_queue);
1883         }
1884         return 0;
1885 }
1886 EXPORT_SYMBOL(scsi_device_quiesce);
1887
1888 /**
1889  *      scsi_device_resume - Restart user issued commands to a quiesced device.
1890  *      @sdev:  scsi device to resume.
1891  *
1892  *      Moves the device from quiesced back to running and restarts the
1893  *      queues.
1894  *
1895  *      Must be called with user context, may sleep.
1896  **/
1897 void
1898 scsi_device_resume(struct scsi_device *sdev)
1899 {
1900         if(scsi_device_set_state(sdev, SDEV_RUNNING))
1901                 return;
1902         scsi_run_queue(sdev->request_queue);
1903 }
1904 EXPORT_SYMBOL(scsi_device_resume);
1905
1906 static void
1907 device_quiesce_fn(struct scsi_device *sdev, void *data)
1908 {
1909         scsi_device_quiesce(sdev);
1910 }
1911
1912 void
1913 scsi_target_quiesce(struct scsi_target *starget)
1914 {
1915         starget_for_each_device(starget, NULL, device_quiesce_fn);
1916 }
1917 EXPORT_SYMBOL(scsi_target_quiesce);
1918
1919 static void
1920 device_resume_fn(struct scsi_device *sdev, void *data)
1921 {
1922         scsi_device_resume(sdev);
1923 }
1924
1925 void
1926 scsi_target_resume(struct scsi_target *starget)
1927 {
1928         starget_for_each_device(starget, NULL, device_resume_fn);
1929 }
1930 EXPORT_SYMBOL(scsi_target_resume);
1931
1932 /**
1933  * scsi_internal_device_block - internal function to put a device
1934  *                              temporarily into the SDEV_BLOCK state
1935  * @sdev:       device to block
1936  *
1937  * Block request made by scsi lld's to temporarily stop all
1938  * scsi commands on the specified device.  Called from interrupt
1939  * or normal process context.
1940  *
1941  * Returns zero if successful or error if not
1942  *
1943  * Notes:       
1944  *      This routine transitions the device to the SDEV_BLOCK state
1945  *      (which must be a legal transition).  When the device is in this
1946  *      state, all commands are deferred until the scsi lld reenables
1947  *      the device with scsi_device_unblock or device_block_tmo fires.
1948  *      This routine assumes the host_lock is held on entry.
1949  **/
1950 int
1951 scsi_internal_device_block(struct scsi_device *sdev)
1952 {
1953         request_queue_t *q = sdev->request_queue;
1954         unsigned long flags;
1955         int err = 0;
1956
1957         err = scsi_device_set_state(sdev, SDEV_BLOCK);
1958         if (err)
1959                 return err;
1960
1961         /* 
1962          * The device has transitioned to SDEV_BLOCK.  Stop the
1963          * block layer from calling the midlayer with this device's
1964          * request queue. 
1965          */
1966         spin_lock_irqsave(q->queue_lock, flags);
1967         blk_stop_queue(q);
1968         spin_unlock_irqrestore(q->queue_lock, flags);
1969
1970         return 0;
1971 }
1972 EXPORT_SYMBOL_GPL(scsi_internal_device_block);
1973  
1974 /**
1975  * scsi_internal_device_unblock - resume a device after a block request
1976  * @sdev:       device to resume
1977  *
1978  * Called by scsi lld's or the midlayer to restart the device queue
1979  * for the previously suspended scsi device.  Called from interrupt or
1980  * normal process context.
1981  *
1982  * Returns zero if successful or error if not.
1983  *
1984  * Notes:       
1985  *      This routine transitions the device to the SDEV_RUNNING state
1986  *      (which must be a legal transition) allowing the midlayer to
1987  *      goose the queue for this device.  This routine assumes the 
1988  *      host_lock is held upon entry.
1989  **/
1990 int
1991 scsi_internal_device_unblock(struct scsi_device *sdev)
1992 {
1993         request_queue_t *q = sdev->request_queue; 
1994         int err;
1995         unsigned long flags;
1996         
1997         /* 
1998          * Try to transition the scsi device to SDEV_RUNNING
1999          * and goose the device queue if successful.  
2000          */
2001         err = scsi_device_set_state(sdev, SDEV_RUNNING);
2002         if (err)
2003                 return err;
2004
2005         spin_lock_irqsave(q->queue_lock, flags);
2006         blk_start_queue(q);
2007         spin_unlock_irqrestore(q->queue_lock, flags);
2008
2009         return 0;
2010 }
2011 EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
2012
2013 static void
2014 device_block(struct scsi_device *sdev, void *data)
2015 {
2016         scsi_internal_device_block(sdev);
2017 }
2018
2019 static int
2020 target_block(struct device *dev, void *data)
2021 {
2022         if (scsi_is_target_device(dev))
2023                 starget_for_each_device(to_scsi_target(dev), NULL,
2024                                         device_block);
2025         return 0;
2026 }
2027
2028 void
2029 scsi_target_block(struct device *dev)
2030 {
2031         if (scsi_is_target_device(dev))
2032                 starget_for_each_device(to_scsi_target(dev), NULL,
2033                                         device_block);
2034         else
2035                 device_for_each_child(dev, NULL, target_block);
2036 }
2037 EXPORT_SYMBOL_GPL(scsi_target_block);
2038
2039 static void
2040 device_unblock(struct scsi_device *sdev, void *data)
2041 {
2042         scsi_internal_device_unblock(sdev);
2043 }
2044
2045 static int
2046 target_unblock(struct device *dev, void *data)
2047 {
2048         if (scsi_is_target_device(dev))
2049                 starget_for_each_device(to_scsi_target(dev), NULL,
2050                                         device_unblock);
2051         return 0;
2052 }
2053
2054 void
2055 scsi_target_unblock(struct device *dev)
2056 {
2057         if (scsi_is_target_device(dev))
2058                 starget_for_each_device(to_scsi_target(dev), NULL,
2059                                         device_unblock);
2060         else
2061                 device_for_each_child(dev, NULL, target_unblock);
2062 }
2063 EXPORT_SYMBOL_GPL(scsi_target_unblock);