58dcb0534a2614d0e76d871858cfb5e5073f90b0
[safe/jmp/linux-2.6] / drivers / scsi / scsi_lib.c
1 /*
2  *  scsi_lib.c Copyright (C) 1999 Eric Youngdale
3  *
4  *  SCSI queueing library.
5  *      Initial versions: Eric Youngdale (eric@andante.org).
6  *                        Based upon conversations with large numbers
7  *                        of people at Linux Expo.
8  */
9
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/completion.h>
13 #include <linux/kernel.h>
14 #include <linux/mempool.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/pci.h>
18 #include <linux/delay.h>
19
20 #include <scsi/scsi.h>
21 #include <scsi/scsi_dbg.h>
22 #include <scsi/scsi_device.h>
23 #include <scsi/scsi_driver.h>
24 #include <scsi/scsi_eh.h>
25 #include <scsi/scsi_host.h>
26 #include <scsi/scsi_request.h>
27
28 #include "scsi_priv.h"
29 #include "scsi_logging.h"
30
31
32 #define SG_MEMPOOL_NR           (sizeof(scsi_sg_pools)/sizeof(struct scsi_host_sg_pool))
33 #define SG_MEMPOOL_SIZE         32
34
35 struct scsi_host_sg_pool {
36         size_t          size;
37         char            *name; 
38         kmem_cache_t    *slab;
39         mempool_t       *pool;
40 };
41
42 #if (SCSI_MAX_PHYS_SEGMENTS < 32)
43 #error SCSI_MAX_PHYS_SEGMENTS is too small
44 #endif
45
46 #define SP(x) { x, "sgpool-" #x } 
47 static struct scsi_host_sg_pool scsi_sg_pools[] = {
48         SP(8),
49         SP(16),
50         SP(32),
51 #if (SCSI_MAX_PHYS_SEGMENTS > 32)
52         SP(64),
53 #if (SCSI_MAX_PHYS_SEGMENTS > 64)
54         SP(128),
55 #if (SCSI_MAX_PHYS_SEGMENTS > 128)
56         SP(256),
57 #if (SCSI_MAX_PHYS_SEGMENTS > 256)
58 #error SCSI_MAX_PHYS_SEGMENTS is too large
59 #endif
60 #endif
61 #endif
62 #endif
63 };      
64 #undef SP
65
66
67 /*
68  * Function:    scsi_insert_special_req()
69  *
70  * Purpose:     Insert pre-formed request into request queue.
71  *
72  * Arguments:   sreq    - request that is ready to be queued.
73  *              at_head - boolean.  True if we should insert at head
74  *                        of queue, false if we should insert at tail.
75  *
76  * Lock status: Assumed that lock is not held upon entry.
77  *
78  * Returns:     Nothing
79  *
80  * Notes:       This function is called from character device and from
81  *              ioctl types of functions where the caller knows exactly
82  *              what SCSI command needs to be issued.   The idea is that
83  *              we merely inject the command into the queue (at the head
84  *              for now), and then call the queue request function to actually
85  *              process it.
86  */
87 int scsi_insert_special_req(struct scsi_request *sreq, int at_head)
88 {
89         /*
90          * Because users of this function are apt to reuse requests with no
91          * modification, we have to sanitise the request flags here
92          */
93         sreq->sr_request->flags &= ~REQ_DONTPREP;
94         blk_insert_request(sreq->sr_device->request_queue, sreq->sr_request,
95                            at_head, sreq);
96         return 0;
97 }
98
99 static void scsi_run_queue(struct request_queue *q);
100
101 /*
102  * Function:    scsi_queue_insert()
103  *
104  * Purpose:     Insert a command in the midlevel queue.
105  *
106  * Arguments:   cmd    - command that we are adding to queue.
107  *              reason - why we are inserting command to queue.
108  *
109  * Lock status: Assumed that lock is not held upon entry.
110  *
111  * Returns:     Nothing.
112  *
113  * Notes:       We do this for one of two cases.  Either the host is busy
114  *              and it cannot accept any more commands for the time being,
115  *              or the device returned QUEUE_FULL and can accept no more
116  *              commands.
117  * Notes:       This could be called either from an interrupt context or a
118  *              normal process context.
119  */
120 int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
121 {
122         struct Scsi_Host *host = cmd->device->host;
123         struct scsi_device *device = cmd->device;
124         struct request_queue *q = device->request_queue;
125         unsigned long flags;
126
127         SCSI_LOG_MLQUEUE(1,
128                  printk("Inserting command %p into mlqueue\n", cmd));
129
130         /*
131          * Set the appropriate busy bit for the device/host.
132          *
133          * If the host/device isn't busy, assume that something actually
134          * completed, and that we should be able to queue a command now.
135          *
136          * Note that the prior mid-layer assumption that any host could
137          * always queue at least one command is now broken.  The mid-layer
138          * will implement a user specifiable stall (see
139          * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
140          * if a command is requeued with no other commands outstanding
141          * either for the device or for the host.
142          */
143         if (reason == SCSI_MLQUEUE_HOST_BUSY)
144                 host->host_blocked = host->max_host_blocked;
145         else if (reason == SCSI_MLQUEUE_DEVICE_BUSY)
146                 device->device_blocked = device->max_device_blocked;
147
148         /*
149          * Decrement the counters, since these commands are no longer
150          * active on the host/device.
151          */
152         scsi_device_unbusy(device);
153
154         /*
155          * Requeue this command.  It will go before all other commands
156          * that are already in the queue.
157          *
158          * NOTE: there is magic here about the way the queue is plugged if
159          * we have no outstanding commands.
160          * 
161          * Although we *don't* plug the queue, we call the request
162          * function.  The SCSI request function detects the blocked condition
163          * and plugs the queue appropriately.
164          */
165         spin_lock_irqsave(q->queue_lock, flags);
166         blk_requeue_request(q, cmd->request);
167         spin_unlock_irqrestore(q->queue_lock, flags);
168
169         scsi_run_queue(q);
170
171         return 0;
172 }
173
174 /*
175  * Function:    scsi_do_req
176  *
177  * Purpose:     Queue a SCSI request
178  *
179  * Arguments:   sreq      - command descriptor.
180  *              cmnd      - actual SCSI command to be performed.
181  *              buffer    - data buffer.
182  *              bufflen   - size of data buffer.
183  *              done      - completion function to be run.
184  *              timeout   - how long to let it run before timeout.
185  *              retries   - number of retries we allow.
186  *
187  * Lock status: No locks held upon entry.
188  *
189  * Returns:     Nothing.
190  *
191  * Notes:       This function is only used for queueing requests for things
192  *              like ioctls and character device requests - this is because
193  *              we essentially just inject a request into the queue for the
194  *              device.
195  *
196  *              In order to support the scsi_device_quiesce function, we
197  *              now inject requests on the *head* of the device queue
198  *              rather than the tail.
199  */
200 void scsi_do_req(struct scsi_request *sreq, const void *cmnd,
201                  void *buffer, unsigned bufflen,
202                  void (*done)(struct scsi_cmnd *),
203                  int timeout, int retries)
204 {
205         /*
206          * If the upper level driver is reusing these things, then
207          * we should release the low-level block now.  Another one will
208          * be allocated later when this request is getting queued.
209          */
210         __scsi_release_request(sreq);
211
212         /*
213          * Our own function scsi_done (which marks the host as not busy,
214          * disables the timeout counter, etc) will be called by us or by the
215          * scsi_hosts[host].queuecommand() function needs to also call
216          * the completion function for the high level driver.
217          */
218         memcpy(sreq->sr_cmnd, cmnd, sizeof(sreq->sr_cmnd));
219         sreq->sr_bufflen = bufflen;
220         sreq->sr_buffer = buffer;
221         sreq->sr_allowed = retries;
222         sreq->sr_done = done;
223         sreq->sr_timeout_per_command = timeout;
224
225         if (sreq->sr_cmd_len == 0)
226                 sreq->sr_cmd_len = COMMAND_SIZE(sreq->sr_cmnd[0]);
227
228         /*
229          * head injection *required* here otherwise quiesce won't work
230          */
231         scsi_insert_special_req(sreq, 1);
232 }
233 EXPORT_SYMBOL(scsi_do_req);
234
235 static void scsi_wait_done(struct scsi_cmnd *cmd)
236 {
237         struct request *req = cmd->request;
238         struct request_queue *q = cmd->device->request_queue;
239         unsigned long flags;
240
241         req->rq_status = RQ_SCSI_DONE;  /* Busy, but indicate request done */
242
243         spin_lock_irqsave(q->queue_lock, flags);
244         if (blk_rq_tagged(req))
245                 blk_queue_end_tag(q, req);
246         spin_unlock_irqrestore(q->queue_lock, flags);
247
248         if (req->waiting)
249                 complete(req->waiting);
250 }
251
252 /* This is the end routine we get to if a command was never attached
253  * to the request.  Simply complete the request without changing
254  * rq_status; this will cause a DRIVER_ERROR. */
255 static void scsi_wait_req_end_io(struct request *req)
256 {
257         BUG_ON(!req->waiting);
258
259         complete(req->waiting);
260 }
261
262 void scsi_wait_req(struct scsi_request *sreq, const void *cmnd, void *buffer,
263                    unsigned bufflen, int timeout, int retries)
264 {
265         DECLARE_COMPLETION(wait);
266         
267         sreq->sr_request->waiting = &wait;
268         sreq->sr_request->rq_status = RQ_SCSI_BUSY;
269         sreq->sr_request->end_io = scsi_wait_req_end_io;
270         scsi_do_req(sreq, cmnd, buffer, bufflen, scsi_wait_done,
271                         timeout, retries);
272         wait_for_completion(&wait);
273         sreq->sr_request->waiting = NULL;
274         if (sreq->sr_request->rq_status != RQ_SCSI_DONE)
275                 sreq->sr_result |= (DRIVER_ERROR << 24);
276
277         __scsi_release_request(sreq);
278 }
279 EXPORT_SYMBOL(scsi_wait_req);
280
281 /*
282  * Function:    scsi_init_cmd_errh()
283  *
284  * Purpose:     Initialize cmd fields related to error handling.
285  *
286  * Arguments:   cmd     - command that is ready to be queued.
287  *
288  * Returns:     Nothing
289  *
290  * Notes:       This function has the job of initializing a number of
291  *              fields related to error handling.   Typically this will
292  *              be called once for each command, as required.
293  */
294 static int scsi_init_cmd_errh(struct scsi_cmnd *cmd)
295 {
296         cmd->serial_number = 0;
297
298         memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer);
299
300         if (cmd->cmd_len == 0)
301                 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
302
303         /*
304          * We need saved copies of a number of fields - this is because
305          * error handling may need to overwrite these with different values
306          * to run different commands, and once error handling is complete,
307          * we will need to restore these values prior to running the actual
308          * command.
309          */
310         cmd->old_use_sg = cmd->use_sg;
311         cmd->old_cmd_len = cmd->cmd_len;
312         cmd->sc_old_data_direction = cmd->sc_data_direction;
313         cmd->old_underflow = cmd->underflow;
314         memcpy(cmd->data_cmnd, cmd->cmnd, sizeof(cmd->cmnd));
315         cmd->buffer = cmd->request_buffer;
316         cmd->bufflen = cmd->request_bufflen;
317
318         return 1;
319 }
320
321 /*
322  * Function:   scsi_setup_cmd_retry()
323  *
324  * Purpose:    Restore the command state for a retry
325  *
326  * Arguments:  cmd      - command to be restored
327  *
328  * Returns:    Nothing
329  *
330  * Notes:      Immediately prior to retrying a command, we need
331  *             to restore certain fields that we saved above.
332  */
333 void scsi_setup_cmd_retry(struct scsi_cmnd *cmd)
334 {
335         memcpy(cmd->cmnd, cmd->data_cmnd, sizeof(cmd->data_cmnd));
336         cmd->request_buffer = cmd->buffer;
337         cmd->request_bufflen = cmd->bufflen;
338         cmd->use_sg = cmd->old_use_sg;
339         cmd->cmd_len = cmd->old_cmd_len;
340         cmd->sc_data_direction = cmd->sc_old_data_direction;
341         cmd->underflow = cmd->old_underflow;
342 }
343
344 void scsi_device_unbusy(struct scsi_device *sdev)
345 {
346         struct Scsi_Host *shost = sdev->host;
347         unsigned long flags;
348
349         spin_lock_irqsave(shost->host_lock, flags);
350         shost->host_busy--;
351         if (unlikely(test_bit(SHOST_RECOVERY, &shost->shost_state) &&
352                      shost->host_failed))
353                 scsi_eh_wakeup(shost);
354         spin_unlock(shost->host_lock);
355         spin_lock(sdev->request_queue->queue_lock);
356         sdev->device_busy--;
357         spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
358 }
359
360 /*
361  * Called for single_lun devices on IO completion. Clear starget_sdev_user,
362  * and call blk_run_queue for all the scsi_devices on the target -
363  * including current_sdev first.
364  *
365  * Called with *no* scsi locks held.
366  */
367 static void scsi_single_lun_run(struct scsi_device *current_sdev)
368 {
369         struct Scsi_Host *shost = current_sdev->host;
370         struct scsi_device *sdev, *tmp;
371         struct scsi_target *starget = scsi_target(current_sdev);
372         unsigned long flags;
373
374         spin_lock_irqsave(shost->host_lock, flags);
375         starget->starget_sdev_user = NULL;
376         spin_unlock_irqrestore(shost->host_lock, flags);
377
378         /*
379          * Call blk_run_queue for all LUNs on the target, starting with
380          * current_sdev. We race with others (to set starget_sdev_user),
381          * but in most cases, we will be first. Ideally, each LU on the
382          * target would get some limited time or requests on the target.
383          */
384         blk_run_queue(current_sdev->request_queue);
385
386         spin_lock_irqsave(shost->host_lock, flags);
387         if (starget->starget_sdev_user)
388                 goto out;
389         list_for_each_entry_safe(sdev, tmp, &starget->devices,
390                         same_target_siblings) {
391                 if (sdev == current_sdev)
392                         continue;
393                 if (scsi_device_get(sdev))
394                         continue;
395
396                 spin_unlock_irqrestore(shost->host_lock, flags);
397                 blk_run_queue(sdev->request_queue);
398                 spin_lock_irqsave(shost->host_lock, flags);
399         
400                 scsi_device_put(sdev);
401         }
402  out:
403         spin_unlock_irqrestore(shost->host_lock, flags);
404 }
405
406 /*
407  * Function:    scsi_run_queue()
408  *
409  * Purpose:     Select a proper request queue to serve next
410  *
411  * Arguments:   q       - last request's queue
412  *
413  * Returns:     Nothing
414  *
415  * Notes:       The previous command was completely finished, start
416  *              a new one if possible.
417  */
418 static void scsi_run_queue(struct request_queue *q)
419 {
420         struct scsi_device *sdev = q->queuedata;
421         struct Scsi_Host *shost = sdev->host;
422         unsigned long flags;
423
424         if (sdev->single_lun)
425                 scsi_single_lun_run(sdev);
426
427         spin_lock_irqsave(shost->host_lock, flags);
428         while (!list_empty(&shost->starved_list) &&
429                !shost->host_blocked && !shost->host_self_blocked &&
430                 !((shost->can_queue > 0) &&
431                   (shost->host_busy >= shost->can_queue))) {
432                 /*
433                  * As long as shost is accepting commands and we have
434                  * starved queues, call blk_run_queue. scsi_request_fn
435                  * drops the queue_lock and can add us back to the
436                  * starved_list.
437                  *
438                  * host_lock protects the starved_list and starved_entry.
439                  * scsi_request_fn must get the host_lock before checking
440                  * or modifying starved_list or starved_entry.
441                  */
442                 sdev = list_entry(shost->starved_list.next,
443                                           struct scsi_device, starved_entry);
444                 list_del_init(&sdev->starved_entry);
445                 spin_unlock_irqrestore(shost->host_lock, flags);
446
447                 blk_run_queue(sdev->request_queue);
448
449                 spin_lock_irqsave(shost->host_lock, flags);
450                 if (unlikely(!list_empty(&sdev->starved_entry)))
451                         /*
452                          * sdev lost a race, and was put back on the
453                          * starved list. This is unlikely but without this
454                          * in theory we could loop forever.
455                          */
456                         break;
457         }
458         spin_unlock_irqrestore(shost->host_lock, flags);
459
460         blk_run_queue(q);
461 }
462
463 /*
464  * Function:    scsi_requeue_command()
465  *
466  * Purpose:     Handle post-processing of completed commands.
467  *
468  * Arguments:   q       - queue to operate on
469  *              cmd     - command that may need to be requeued.
470  *
471  * Returns:     Nothing
472  *
473  * Notes:       After command completion, there may be blocks left
474  *              over which weren't finished by the previous command
475  *              this can be for a number of reasons - the main one is
476  *              I/O errors in the middle of the request, in which case
477  *              we need to request the blocks that come after the bad
478  *              sector.
479  */
480 static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
481 {
482         unsigned long flags;
483
484         cmd->request->flags &= ~REQ_DONTPREP;
485
486         spin_lock_irqsave(q->queue_lock, flags);
487         blk_requeue_request(q, cmd->request);
488         spin_unlock_irqrestore(q->queue_lock, flags);
489
490         scsi_run_queue(q);
491 }
492
493 void scsi_next_command(struct scsi_cmnd *cmd)
494 {
495         struct request_queue *q = cmd->device->request_queue;
496
497         scsi_put_command(cmd);
498         scsi_run_queue(q);
499 }
500
501 void scsi_run_host_queues(struct Scsi_Host *shost)
502 {
503         struct scsi_device *sdev;
504
505         shost_for_each_device(sdev, shost)
506                 scsi_run_queue(sdev->request_queue);
507 }
508
509 /*
510  * Function:    scsi_end_request()
511  *
512  * Purpose:     Post-processing of completed commands (usually invoked at end
513  *              of upper level post-processing and scsi_io_completion).
514  *
515  * Arguments:   cmd      - command that is complete.
516  *              uptodate - 1 if I/O indicates success, <= 0 for I/O error.
517  *              bytes    - number of bytes of completed I/O
518  *              requeue  - indicates whether we should requeue leftovers.
519  *
520  * Lock status: Assumed that lock is not held upon entry.
521  *
522  * Returns:     cmd if requeue done or required, NULL otherwise
523  *
524  * Notes:       This is called for block device requests in order to
525  *              mark some number of sectors as complete.
526  * 
527  *              We are guaranteeing that the request queue will be goosed
528  *              at some point during this call.
529  */
530 static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
531                                           int bytes, int requeue)
532 {
533         request_queue_t *q = cmd->device->request_queue;
534         struct request *req = cmd->request;
535         unsigned long flags;
536
537         /*
538          * If there are blocks left over at the end, set up the command
539          * to queue the remainder of them.
540          */
541         if (end_that_request_chunk(req, uptodate, bytes)) {
542                 int leftover = (req->hard_nr_sectors << 9);
543
544                 if (blk_pc_request(req))
545                         leftover = req->data_len;
546
547                 /* kill remainder if no retrys */
548                 if (!uptodate && blk_noretry_request(req))
549                         end_that_request_chunk(req, 0, leftover);
550                 else {
551                         if (requeue)
552                                 /*
553                                  * Bleah.  Leftovers again.  Stick the
554                                  * leftovers in the front of the
555                                  * queue, and goose the queue again.
556                                  */
557                                 scsi_requeue_command(q, cmd);
558
559                         return cmd;
560                 }
561         }
562
563         add_disk_randomness(req->rq_disk);
564
565         spin_lock_irqsave(q->queue_lock, flags);
566         if (blk_rq_tagged(req))
567                 blk_queue_end_tag(q, req);
568         end_that_request_last(req);
569         spin_unlock_irqrestore(q->queue_lock, flags);
570
571         /*
572          * This will goose the queue request function at the end, so we don't
573          * need to worry about launching another command.
574          */
575         scsi_next_command(cmd);
576         return NULL;
577 }
578
579 static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, int gfp_mask)
580 {
581         struct scsi_host_sg_pool *sgp;
582         struct scatterlist *sgl;
583
584         BUG_ON(!cmd->use_sg);
585
586         switch (cmd->use_sg) {
587         case 1 ... 8:
588                 cmd->sglist_len = 0;
589                 break;
590         case 9 ... 16:
591                 cmd->sglist_len = 1;
592                 break;
593         case 17 ... 32:
594                 cmd->sglist_len = 2;
595                 break;
596 #if (SCSI_MAX_PHYS_SEGMENTS > 32)
597         case 33 ... 64:
598                 cmd->sglist_len = 3;
599                 break;
600 #if (SCSI_MAX_PHYS_SEGMENTS > 64)
601         case 65 ... 128:
602                 cmd->sglist_len = 4;
603                 break;
604 #if (SCSI_MAX_PHYS_SEGMENTS  > 128)
605         case 129 ... 256:
606                 cmd->sglist_len = 5;
607                 break;
608 #endif
609 #endif
610 #endif
611         default:
612                 return NULL;
613         }
614
615         sgp = scsi_sg_pools + cmd->sglist_len;
616         sgl = mempool_alloc(sgp->pool, gfp_mask);
617         if (sgl)
618                 memset(sgl, 0, sgp->size);
619         return sgl;
620 }
621
622 static void scsi_free_sgtable(struct scatterlist *sgl, int index)
623 {
624         struct scsi_host_sg_pool *sgp;
625
626         BUG_ON(index > SG_MEMPOOL_NR);
627
628         sgp = scsi_sg_pools + index;
629         mempool_free(sgl, sgp->pool);
630 }
631
632 /*
633  * Function:    scsi_release_buffers()
634  *
635  * Purpose:     Completion processing for block device I/O requests.
636  *
637  * Arguments:   cmd     - command that we are bailing.
638  *
639  * Lock status: Assumed that no lock is held upon entry.
640  *
641  * Returns:     Nothing
642  *
643  * Notes:       In the event that an upper level driver rejects a
644  *              command, we must release resources allocated during
645  *              the __init_io() function.  Primarily this would involve
646  *              the scatter-gather table, and potentially any bounce
647  *              buffers.
648  */
649 static void scsi_release_buffers(struct scsi_cmnd *cmd)
650 {
651         struct request *req = cmd->request;
652
653         /*
654          * Free up any indirection buffers we allocated for DMA purposes. 
655          */
656         if (cmd->use_sg)
657                 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
658         else if (cmd->request_buffer != req->buffer)
659                 kfree(cmd->request_buffer);
660
661         /*
662          * Zero these out.  They now point to freed memory, and it is
663          * dangerous to hang onto the pointers.
664          */
665         cmd->buffer  = NULL;
666         cmd->bufflen = 0;
667         cmd->request_buffer = NULL;
668         cmd->request_bufflen = 0;
669 }
670
671 /*
672  * Function:    scsi_io_completion()
673  *
674  * Purpose:     Completion processing for block device I/O requests.
675  *
676  * Arguments:   cmd   - command that is finished.
677  *
678  * Lock status: Assumed that no lock is held upon entry.
679  *
680  * Returns:     Nothing
681  *
682  * Notes:       This function is matched in terms of capabilities to
683  *              the function that created the scatter-gather list.
684  *              In other words, if there are no bounce buffers
685  *              (the normal case for most drivers), we don't need
686  *              the logic to deal with cleaning up afterwards.
687  *
688  *              We must do one of several things here:
689  *
690  *              a) Call scsi_end_request.  This will finish off the
691  *                 specified number of sectors.  If we are done, the
692  *                 command block will be released, and the queue
693  *                 function will be goosed.  If we are not done, then
694  *                 scsi_end_request will directly goose the queue.
695  *
696  *              b) We can just use scsi_requeue_command() here.  This would
697  *                 be used if we just wanted to retry, for example.
698  */
699 void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
700                         unsigned int block_bytes)
701 {
702         int result = cmd->result;
703         int this_count = cmd->bufflen;
704         request_queue_t *q = cmd->device->request_queue;
705         struct request *req = cmd->request;
706         int clear_errors = 1;
707         struct scsi_sense_hdr sshdr;
708         int sense_valid = 0;
709         int sense_deferred = 0;
710
711         if (blk_complete_barrier_rq(q, req, good_bytes >> 9))
712                 return;
713
714         /*
715          * Free up any indirection buffers we allocated for DMA purposes. 
716          * For the case of a READ, we need to copy the data out of the
717          * bounce buffer and into the real buffer.
718          */
719         if (cmd->use_sg)
720                 scsi_free_sgtable(cmd->buffer, cmd->sglist_len);
721         else if (cmd->buffer != req->buffer) {
722                 if (rq_data_dir(req) == READ) {
723                         unsigned long flags;
724                         char *to = bio_kmap_irq(req->bio, &flags);
725                         memcpy(to, cmd->buffer, cmd->bufflen);
726                         bio_kunmap_irq(to, &flags);
727                 }
728                 kfree(cmd->buffer);
729         }
730
731         if (result) {
732                 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
733                 if (sense_valid)
734                         sense_deferred = scsi_sense_is_deferred(&sshdr);
735         }
736         if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
737                 req->errors = result;
738                 if (result) {
739                         clear_errors = 0;
740                         if (sense_valid && req->sense) {
741                                 /*
742                                  * SG_IO wants current and deferred errors
743                                  */
744                                 int len = 8 + cmd->sense_buffer[7];
745
746                                 if (len > SCSI_SENSE_BUFFERSIZE)
747                                         len = SCSI_SENSE_BUFFERSIZE;
748                                 memcpy(req->sense, cmd->sense_buffer,  len);
749                                 req->sense_len = len;
750                         }
751                 } else
752                         req->data_len = cmd->resid;
753         }
754
755         /*
756          * Zero these out.  They now point to freed memory, and it is
757          * dangerous to hang onto the pointers.
758          */
759         cmd->buffer  = NULL;
760         cmd->bufflen = 0;
761         cmd->request_buffer = NULL;
762         cmd->request_bufflen = 0;
763
764         /*
765          * Next deal with any sectors which we were able to correctly
766          * handle.
767          */
768         if (good_bytes >= 0) {
769                 SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, %d bytes done.\n",
770                                               req->nr_sectors, good_bytes));
771                 SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd->use_sg));
772
773                 if (clear_errors)
774                         req->errors = 0;
775                 /*
776                  * If multiple sectors are requested in one buffer, then
777                  * they will have been finished off by the first command.
778                  * If not, then we have a multi-buffer command.
779                  *
780                  * If block_bytes != 0, it means we had a medium error
781                  * of some sort, and that we want to mark some number of
782                  * sectors as not uptodate.  Thus we want to inhibit
783                  * requeueing right here - we will requeue down below
784                  * when we handle the bad sectors.
785                  */
786                 cmd = scsi_end_request(cmd, 1, good_bytes, result == 0);
787
788                 /*
789                  * If the command completed without error, then either finish off the
790                  * rest of the command, or start a new one.
791                  */
792                 if (result == 0 || cmd == NULL ) {
793                         return;
794                 }
795         }
796         /*
797          * Now, if we were good little boys and girls, Santa left us a request
798          * sense buffer.  We can extract information from this, so we
799          * can choose a block to remap, etc.
800          */
801         if (sense_valid && !sense_deferred) {
802                 switch (sshdr.sense_key) {
803                 case UNIT_ATTENTION:
804                         if (cmd->device->removable) {
805                                 /* detected disc change.  set a bit 
806                                  * and quietly refuse further access.
807                                  */
808                                 cmd->device->changed = 1;
809                                 cmd = scsi_end_request(cmd, 0,
810                                                 this_count, 1);
811                                 return;
812                         } else {
813                                 /*
814                                 * Must have been a power glitch, or a
815                                 * bus reset.  Could not have been a
816                                 * media change, so we just retry the
817                                 * request and see what happens.  
818                                 */
819                                 scsi_requeue_command(q, cmd);
820                                 return;
821                         }
822                         break;
823                 case ILLEGAL_REQUEST:
824                         /*
825                         * If we had an ILLEGAL REQUEST returned, then we may
826                         * have performed an unsupported command.  The only
827                         * thing this should be would be a ten byte read where
828                         * only a six byte read was supported.  Also, on a
829                         * system where READ CAPACITY failed, we may have read
830                         * past the end of the disk.
831                         */
832                         if (cmd->device->use_10_for_rw &&
833                             (cmd->cmnd[0] == READ_10 ||
834                              cmd->cmnd[0] == WRITE_10)) {
835                                 cmd->device->use_10_for_rw = 0;
836                                 /*
837                                  * This will cause a retry with a 6-byte
838                                  * command.
839                                  */
840                                 scsi_requeue_command(q, cmd);
841                                 result = 0;
842                         } else {
843                                 cmd = scsi_end_request(cmd, 0, this_count, 1);
844                                 return;
845                         }
846                         break;
847                 case NOT_READY:
848                         /*
849                          * If the device is in the process of becoming ready,
850                          * retry.
851                          */
852                         if (sshdr.asc == 0x04 && sshdr.ascq == 0x01) {
853                                 scsi_requeue_command(q, cmd);
854                                 return;
855                         }
856                         printk(KERN_INFO "Device %s not ready.\n",
857                                req->rq_disk ? req->rq_disk->disk_name : "");
858                         cmd = scsi_end_request(cmd, 0, this_count, 1);
859                         return;
860                 case VOLUME_OVERFLOW:
861                         printk(KERN_INFO "Volume overflow <%d %d %d %d> CDB: ",
862                                cmd->device->host->host_no,
863                                (int)cmd->device->channel,
864                                (int)cmd->device->id, (int)cmd->device->lun);
865                         __scsi_print_command(cmd->data_cmnd);
866                         scsi_print_sense("", cmd);
867                         cmd = scsi_end_request(cmd, 0, block_bytes, 1);
868                         return;
869                 default:
870                         break;
871                 }
872         }                       /* driver byte != 0 */
873         if (host_byte(result) == DID_RESET) {
874                 /*
875                  * Third party bus reset or reset for error
876                  * recovery reasons.  Just retry the request
877                  * and see what happens.  
878                  */
879                 scsi_requeue_command(q, cmd);
880                 return;
881         }
882         if (result) {
883                 printk(KERN_INFO "SCSI error : <%d %d %d %d> return code "
884                        "= 0x%x\n", cmd->device->host->host_no,
885                        cmd->device->channel,
886                        cmd->device->id,
887                        cmd->device->lun, result);
888
889                 if (driver_byte(result) & DRIVER_SENSE)
890                         scsi_print_sense("", cmd);
891                 /*
892                  * Mark a single buffer as not uptodate.  Queue the remainder.
893                  * We sometimes get this cruft in the event that a medium error
894                  * isn't properly reported.
895                  */
896                 block_bytes = req->hard_cur_sectors << 9;
897                 if (!block_bytes)
898                         block_bytes = req->data_len;
899                 cmd = scsi_end_request(cmd, 0, block_bytes, 1);
900         }
901 }
902 EXPORT_SYMBOL(scsi_io_completion);
903
904 /*
905  * Function:    scsi_init_io()
906  *
907  * Purpose:     SCSI I/O initialize function.
908  *
909  * Arguments:   cmd   - Command descriptor we wish to initialize
910  *
911  * Returns:     0 on success
912  *              BLKPREP_DEFER if the failure is retryable
913  *              BLKPREP_KILL if the failure is fatal
914  */
915 static int scsi_init_io(struct scsi_cmnd *cmd)
916 {
917         struct request     *req = cmd->request;
918         struct scatterlist *sgpnt;
919         int                count;
920
921         /*
922          * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer
923          */
924         if ((req->flags & REQ_BLOCK_PC) && !req->bio) {
925                 cmd->request_bufflen = req->data_len;
926                 cmd->request_buffer = req->data;
927                 req->buffer = req->data;
928                 cmd->use_sg = 0;
929                 return 0;
930         }
931
932         /*
933          * we used to not use scatter-gather for single segment request,
934          * but now we do (it makes highmem I/O easier to support without
935          * kmapping pages)
936          */
937         cmd->use_sg = req->nr_phys_segments;
938
939         /*
940          * if sg table allocation fails, requeue request later.
941          */
942         sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
943         if (unlikely(!sgpnt))
944                 return BLKPREP_DEFER;
945
946         cmd->request_buffer = (char *) sgpnt;
947         cmd->request_bufflen = req->nr_sectors << 9;
948         if (blk_pc_request(req))
949                 cmd->request_bufflen = req->data_len;
950         req->buffer = NULL;
951
952         /* 
953          * Next, walk the list, and fill in the addresses and sizes of
954          * each segment.
955          */
956         count = blk_rq_map_sg(req->q, req, cmd->request_buffer);
957
958         /*
959          * mapped well, send it off
960          */
961         if (likely(count <= cmd->use_sg)) {
962                 cmd->use_sg = count;
963                 return 0;
964         }
965
966         printk(KERN_ERR "Incorrect number of segments after building list\n");
967         printk(KERN_ERR "counted %d, received %d\n", count, cmd->use_sg);
968         printk(KERN_ERR "req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors,
969                         req->current_nr_sectors);
970
971         /* release the command and kill it */
972         scsi_release_buffers(cmd);
973         scsi_put_command(cmd);
974         return BLKPREP_KILL;
975 }
976
977 static int scsi_prepare_flush_fn(request_queue_t *q, struct request *rq)
978 {
979         struct scsi_device *sdev = q->queuedata;
980         struct scsi_driver *drv;
981
982         if (sdev->sdev_state == SDEV_RUNNING) {
983                 drv = *(struct scsi_driver **) rq->rq_disk->private_data;
984
985                 if (drv->prepare_flush)
986                         return drv->prepare_flush(q, rq);
987         }
988
989         return 0;
990 }
991
992 static void scsi_end_flush_fn(request_queue_t *q, struct request *rq)
993 {
994         struct scsi_device *sdev = q->queuedata;
995         struct request *flush_rq = rq->end_io_data;
996         struct scsi_driver *drv;
997
998         if (flush_rq->errors) {
999                 printk("scsi: barrier error, disabling flush support\n");
1000                 blk_queue_ordered(q, QUEUE_ORDERED_NONE);
1001         }
1002
1003         if (sdev->sdev_state == SDEV_RUNNING) {
1004                 drv = *(struct scsi_driver **) rq->rq_disk->private_data;
1005                 drv->end_flush(q, rq);
1006         }
1007 }
1008
1009 static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
1010                                sector_t *error_sector)
1011 {
1012         struct scsi_device *sdev = q->queuedata;
1013         struct scsi_driver *drv;
1014
1015         if (sdev->sdev_state != SDEV_RUNNING)
1016                 return -ENXIO;
1017
1018         drv = *(struct scsi_driver **) disk->private_data;
1019         if (drv->issue_flush)
1020                 return drv->issue_flush(&sdev->sdev_gendev, error_sector);
1021
1022         return -EOPNOTSUPP;
1023 }
1024
1025 static int scsi_prep_fn(struct request_queue *q, struct request *req)
1026 {
1027         struct scsi_device *sdev = q->queuedata;
1028         struct scsi_cmnd *cmd;
1029         int specials_only = 0;
1030
1031         /*
1032          * Just check to see if the device is online.  If it isn't, we
1033          * refuse to process any commands.  The device must be brought
1034          * online before trying any recovery commands
1035          */
1036         if (unlikely(!scsi_device_online(sdev))) {
1037                 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n",
1038                        sdev->host->host_no, sdev->id, sdev->lun);
1039                 return BLKPREP_KILL;
1040         }
1041         if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1042                 /* OK, we're not in a running state don't prep
1043                  * user commands */
1044                 if (sdev->sdev_state == SDEV_DEL) {
1045                         /* Device is fully deleted, no commands
1046                          * at all allowed down */
1047                         printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to dead device\n",
1048                                sdev->host->host_no, sdev->id, sdev->lun);
1049                         return BLKPREP_KILL;
1050                 }
1051                 /* OK, we only allow special commands (i.e. not
1052                  * user initiated ones */
1053                 specials_only = sdev->sdev_state;
1054         }
1055
1056         /*
1057          * Find the actual device driver associated with this command.
1058          * The SPECIAL requests are things like character device or
1059          * ioctls, which did not originate from ll_rw_blk.  Note that
1060          * the special field is also used to indicate the cmd for
1061          * the remainder of a partially fulfilled request that can 
1062          * come up when there is a medium error.  We have to treat
1063          * these two cases differently.  We differentiate by looking
1064          * at request->cmd, as this tells us the real story.
1065          */
1066         if (req->flags & REQ_SPECIAL) {
1067                 struct scsi_request *sreq = req->special;
1068
1069                 if (sreq->sr_magic == SCSI_REQ_MAGIC) {
1070                         cmd = scsi_get_command(sreq->sr_device, GFP_ATOMIC);
1071                         if (unlikely(!cmd))
1072                                 goto defer;
1073                         scsi_init_cmd_from_req(cmd, sreq);
1074                 } else
1075                         cmd = req->special;
1076         } else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1077
1078                 if(unlikely(specials_only)) {
1079                         if(specials_only == SDEV_QUIESCE ||
1080                                         specials_only == SDEV_BLOCK)
1081                                 return BLKPREP_DEFER;
1082                         
1083                         printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to device being removed\n",
1084                                sdev->host->host_no, sdev->id, sdev->lun);
1085                         return BLKPREP_KILL;
1086                 }
1087                         
1088                         
1089                 /*
1090                  * Now try and find a command block that we can use.
1091                  */
1092                 if (!req->special) {
1093                         cmd = scsi_get_command(sdev, GFP_ATOMIC);
1094                         if (unlikely(!cmd))
1095                                 goto defer;
1096                 } else
1097                         cmd = req->special;
1098                 
1099                 /* pull a tag out of the request if we have one */
1100                 cmd->tag = req->tag;
1101         } else {
1102                 blk_dump_rq_flags(req, "SCSI bad req");
1103                 return BLKPREP_KILL;
1104         }
1105         
1106         /* note the overloading of req->special.  When the tag
1107          * is active it always means cmd.  If the tag goes
1108          * back for re-queueing, it may be reset */
1109         req->special = cmd;
1110         cmd->request = req;
1111         
1112         /*
1113          * FIXME: drop the lock here because the functions below
1114          * expect to be called without the queue lock held.  Also,
1115          * previously, we dequeued the request before dropping the
1116          * lock.  We hope REQ_STARTED prevents anything untoward from
1117          * happening now.
1118          */
1119         if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1120                 struct scsi_driver *drv;
1121                 int ret;
1122
1123                 /*
1124                  * This will do a couple of things:
1125                  *  1) Fill in the actual SCSI command.
1126                  *  2) Fill in any other upper-level specific fields
1127                  * (timeout).
1128                  *
1129                  * If this returns 0, it means that the request failed
1130                  * (reading past end of disk, reading offline device,
1131                  * etc).   This won't actually talk to the device, but
1132                  * some kinds of consistency checking may cause the     
1133                  * request to be rejected immediately.
1134                  */
1135
1136                 /* 
1137                  * This sets up the scatter-gather table (allocating if
1138                  * required).
1139                  */
1140                 ret = scsi_init_io(cmd);
1141                 if (ret)        /* BLKPREP_KILL return also releases the command */
1142                         return ret;
1143                 
1144                 /*
1145                  * Initialize the actual SCSI command for this request.
1146                  */
1147                 drv = *(struct scsi_driver **)req->rq_disk->private_data;
1148                 if (unlikely(!drv->init_command(cmd))) {
1149                         scsi_release_buffers(cmd);
1150                         scsi_put_command(cmd);
1151                         return BLKPREP_KILL;
1152                 }
1153         }
1154
1155         /*
1156          * The request is now prepped, no need to come back here
1157          */
1158         req->flags |= REQ_DONTPREP;
1159         return BLKPREP_OK;
1160
1161  defer:
1162         /* If we defer, the elv_next_request() returns NULL, but the
1163          * queue must be restarted, so we plug here if no returning
1164          * command will automatically do that. */
1165         if (sdev->device_busy == 0)
1166                 blk_plug_device(q);
1167         return BLKPREP_DEFER;
1168 }
1169
1170 /*
1171  * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1172  * return 0.
1173  *
1174  * Called with the queue_lock held.
1175  */
1176 static inline int scsi_dev_queue_ready(struct request_queue *q,
1177                                   struct scsi_device *sdev)
1178 {
1179         if (sdev->device_busy >= sdev->queue_depth)
1180                 return 0;
1181         if (sdev->device_busy == 0 && sdev->device_blocked) {
1182                 /*
1183                  * unblock after device_blocked iterates to zero
1184                  */
1185                 if (--sdev->device_blocked == 0) {
1186                         SCSI_LOG_MLQUEUE(3,
1187                                 printk("scsi%d (%d:%d) unblocking device at"
1188                                        " zero depth\n", sdev->host->host_no,
1189                                        sdev->id, sdev->lun));
1190                 } else {
1191                         blk_plug_device(q);
1192                         return 0;
1193                 }
1194         }
1195         if (sdev->device_blocked)
1196                 return 0;
1197
1198         return 1;
1199 }
1200
1201 /*
1202  * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1203  * return 0. We must end up running the queue again whenever 0 is
1204  * returned, else IO can hang.
1205  *
1206  * Called with host_lock held.
1207  */
1208 static inline int scsi_host_queue_ready(struct request_queue *q,
1209                                    struct Scsi_Host *shost,
1210                                    struct scsi_device *sdev)
1211 {
1212         if (test_bit(SHOST_RECOVERY, &shost->shost_state))
1213                 return 0;
1214         if (shost->host_busy == 0 && shost->host_blocked) {
1215                 /*
1216                  * unblock after host_blocked iterates to zero
1217                  */
1218                 if (--shost->host_blocked == 0) {
1219                         SCSI_LOG_MLQUEUE(3,
1220                                 printk("scsi%d unblocking host at zero depth\n",
1221                                         shost->host_no));
1222                 } else {
1223                         blk_plug_device(q);
1224                         return 0;
1225                 }
1226         }
1227         if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
1228             shost->host_blocked || shost->host_self_blocked) {
1229                 if (list_empty(&sdev->starved_entry))
1230                         list_add_tail(&sdev->starved_entry, &shost->starved_list);
1231                 return 0;
1232         }
1233
1234         /* We're OK to process the command, so we can't be starved */
1235         if (!list_empty(&sdev->starved_entry))
1236                 list_del_init(&sdev->starved_entry);
1237
1238         return 1;
1239 }
1240
1241 /*
1242  * Kill requests for a dead device
1243  */
1244 static void scsi_kill_requests(request_queue_t *q)
1245 {
1246         struct request *req;
1247
1248         while ((req = elv_next_request(q)) != NULL) {
1249                 blkdev_dequeue_request(req);
1250                 req->flags |= REQ_QUIET;
1251                 while (end_that_request_first(req, 0, req->nr_sectors))
1252                         ;
1253                 end_that_request_last(req);
1254         }
1255 }
1256
1257 /*
1258  * Function:    scsi_request_fn()
1259  *
1260  * Purpose:     Main strategy routine for SCSI.
1261  *
1262  * Arguments:   q       - Pointer to actual queue.
1263  *
1264  * Returns:     Nothing
1265  *
1266  * Lock status: IO request lock assumed to be held when called.
1267  */
1268 static void scsi_request_fn(struct request_queue *q)
1269 {
1270         struct scsi_device *sdev = q->queuedata;
1271         struct Scsi_Host *shost;
1272         struct scsi_cmnd *cmd;
1273         struct request *req;
1274
1275         if (!sdev) {
1276                 printk("scsi: killing requests for dead queue\n");
1277                 scsi_kill_requests(q);
1278                 return;
1279         }
1280
1281         if(!get_device(&sdev->sdev_gendev))
1282                 /* We must be tearing the block queue down already */
1283                 return;
1284
1285         /*
1286          * To start with, we keep looping until the queue is empty, or until
1287          * the host is no longer able to accept any more requests.
1288          */
1289         shost = sdev->host;
1290         while (!blk_queue_plugged(q)) {
1291                 int rtn;
1292                 /*
1293                  * get next queueable request.  We do this early to make sure
1294                  * that the request is fully prepared even if we cannot 
1295                  * accept it.
1296                  */
1297                 req = elv_next_request(q);
1298                 if (!req || !scsi_dev_queue_ready(q, sdev))
1299                         break;
1300
1301                 if (unlikely(!scsi_device_online(sdev))) {
1302                         printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n",
1303                                sdev->host->host_no, sdev->id, sdev->lun);
1304                         blkdev_dequeue_request(req);
1305                         req->flags |= REQ_QUIET;
1306                         while (end_that_request_first(req, 0, req->nr_sectors))
1307                                 ;
1308                         end_that_request_last(req);
1309                         continue;
1310                 }
1311
1312
1313                 /*
1314                  * Remove the request from the request list.
1315                  */
1316                 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1317                         blkdev_dequeue_request(req);
1318                 sdev->device_busy++;
1319
1320                 spin_unlock(q->queue_lock);
1321                 spin_lock(shost->host_lock);
1322
1323                 if (!scsi_host_queue_ready(q, shost, sdev))
1324                         goto not_ready;
1325                 if (sdev->single_lun) {
1326                         if (scsi_target(sdev)->starget_sdev_user &&
1327                             scsi_target(sdev)->starget_sdev_user != sdev)
1328                                 goto not_ready;
1329                         scsi_target(sdev)->starget_sdev_user = sdev;
1330                 }
1331                 shost->host_busy++;
1332
1333                 /*
1334                  * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1335                  *              take the lock again.
1336                  */
1337                 spin_unlock_irq(shost->host_lock);
1338
1339                 cmd = req->special;
1340                 if (unlikely(cmd == NULL)) {
1341                         printk(KERN_CRIT "impossible request in %s.\n"
1342                                          "please mail a stack trace to "
1343                                          "linux-scsi@vger.kernel.org",
1344                                          __FUNCTION__);
1345                         BUG();
1346                 }
1347
1348                 /*
1349                  * Finally, initialize any error handling parameters, and set up
1350                  * the timers for timeouts.
1351                  */
1352                 scsi_init_cmd_errh(cmd);
1353
1354                 /*
1355                  * Dispatch the command to the low-level driver.
1356                  */
1357                 rtn = scsi_dispatch_cmd(cmd);
1358                 spin_lock_irq(q->queue_lock);
1359                 if(rtn) {
1360                         /* we're refusing the command; because of
1361                          * the way locks get dropped, we need to 
1362                          * check here if plugging is required */
1363                         if(sdev->device_busy == 0)
1364                                 blk_plug_device(q);
1365
1366                         break;
1367                 }
1368         }
1369
1370         goto out;
1371
1372  not_ready:
1373         spin_unlock_irq(shost->host_lock);
1374
1375         /*
1376          * lock q, handle tag, requeue req, and decrement device_busy. We
1377          * must return with queue_lock held.
1378          *
1379          * Decrementing device_busy without checking it is OK, as all such
1380          * cases (host limits or settings) should run the queue at some
1381          * later time.
1382          */
1383         spin_lock_irq(q->queue_lock);
1384         blk_requeue_request(q, req);
1385         sdev->device_busy--;
1386         if(sdev->device_busy == 0)
1387                 blk_plug_device(q);
1388  out:
1389         /* must be careful here...if we trigger the ->remove() function
1390          * we cannot be holding the q lock */
1391         spin_unlock_irq(q->queue_lock);
1392         put_device(&sdev->sdev_gendev);
1393         spin_lock_irq(q->queue_lock);
1394 }
1395
1396 u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1397 {
1398         struct device *host_dev;
1399         u64 bounce_limit = 0xffffffff;
1400
1401         if (shost->unchecked_isa_dma)
1402                 return BLK_BOUNCE_ISA;
1403         /*
1404          * Platforms with virtual-DMA translation
1405          * hardware have no practical limit.
1406          */
1407         if (!PCI_DMA_BUS_IS_PHYS)
1408                 return BLK_BOUNCE_ANY;
1409
1410         host_dev = scsi_get_device(shost);
1411         if (host_dev && host_dev->dma_mask)
1412                 bounce_limit = *host_dev->dma_mask;
1413
1414         return bounce_limit;
1415 }
1416 EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1417
1418 struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1419 {
1420         struct Scsi_Host *shost = sdev->host;
1421         struct request_queue *q;
1422
1423         q = blk_init_queue(scsi_request_fn, NULL);
1424         if (!q)
1425                 return NULL;
1426
1427         blk_queue_prep_rq(q, scsi_prep_fn);
1428
1429         blk_queue_max_hw_segments(q, shost->sg_tablesize);
1430         blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS);
1431         blk_queue_max_sectors(q, shost->max_sectors);
1432         blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1433         blk_queue_segment_boundary(q, shost->dma_boundary);
1434         blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
1435
1436         /*
1437          * ordered tags are superior to flush ordering
1438          */
1439         if (shost->ordered_tag)
1440                 blk_queue_ordered(q, QUEUE_ORDERED_TAG);
1441         else if (shost->ordered_flush) {
1442                 blk_queue_ordered(q, QUEUE_ORDERED_FLUSH);
1443                 q->prepare_flush_fn = scsi_prepare_flush_fn;
1444                 q->end_flush_fn = scsi_end_flush_fn;
1445         }
1446
1447         if (!shost->use_clustering)
1448                 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
1449         return q;
1450 }
1451
1452 void scsi_free_queue(struct request_queue *q)
1453 {
1454         blk_cleanup_queue(q);
1455 }
1456
1457 /*
1458  * Function:    scsi_block_requests()
1459  *
1460  * Purpose:     Utility function used by low-level drivers to prevent further
1461  *              commands from being queued to the device.
1462  *
1463  * Arguments:   shost       - Host in question
1464  *
1465  * Returns:     Nothing
1466  *
1467  * Lock status: No locks are assumed held.
1468  *
1469  * Notes:       There is no timer nor any other means by which the requests
1470  *              get unblocked other than the low-level driver calling
1471  *              scsi_unblock_requests().
1472  */
1473 void scsi_block_requests(struct Scsi_Host *shost)
1474 {
1475         shost->host_self_blocked = 1;
1476 }
1477 EXPORT_SYMBOL(scsi_block_requests);
1478
1479 /*
1480  * Function:    scsi_unblock_requests()
1481  *
1482  * Purpose:     Utility function used by low-level drivers to allow further
1483  *              commands from being queued to the device.
1484  *
1485  * Arguments:   shost       - Host in question
1486  *
1487  * Returns:     Nothing
1488  *
1489  * Lock status: No locks are assumed held.
1490  *
1491  * Notes:       There is no timer nor any other means by which the requests
1492  *              get unblocked other than the low-level driver calling
1493  *              scsi_unblock_requests().
1494  *
1495  *              This is done as an API function so that changes to the
1496  *              internals of the scsi mid-layer won't require wholesale
1497  *              changes to drivers that use this feature.
1498  */
1499 void scsi_unblock_requests(struct Scsi_Host *shost)
1500 {
1501         shost->host_self_blocked = 0;
1502         scsi_run_host_queues(shost);
1503 }
1504 EXPORT_SYMBOL(scsi_unblock_requests);
1505
1506 int __init scsi_init_queue(void)
1507 {
1508         int i;
1509
1510         for (i = 0; i < SG_MEMPOOL_NR; i++) {
1511                 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1512                 int size = sgp->size * sizeof(struct scatterlist);
1513
1514                 sgp->slab = kmem_cache_create(sgp->name, size, 0,
1515                                 SLAB_HWCACHE_ALIGN, NULL, NULL);
1516                 if (!sgp->slab) {
1517                         printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1518                                         sgp->name);
1519                 }
1520
1521                 sgp->pool = mempool_create(SG_MEMPOOL_SIZE,
1522                                 mempool_alloc_slab, mempool_free_slab,
1523                                 sgp->slab);
1524                 if (!sgp->pool) {
1525                         printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1526                                         sgp->name);
1527                 }
1528         }
1529
1530         return 0;
1531 }
1532
1533 void scsi_exit_queue(void)
1534 {
1535         int i;
1536
1537         for (i = 0; i < SG_MEMPOOL_NR; i++) {
1538                 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1539                 mempool_destroy(sgp->pool);
1540                 kmem_cache_destroy(sgp->slab);
1541         }
1542 }
1543 /**
1544  *      __scsi_mode_sense - issue a mode sense, falling back from 10 to 
1545  *              six bytes if necessary.
1546  *      @sreq:  SCSI request to fill in with the MODE_SENSE
1547  *      @dbd:   set if mode sense will allow block descriptors to be returned
1548  *      @modepage: mode page being requested
1549  *      @buffer: request buffer (may not be smaller than eight bytes)
1550  *      @len:   length of request buffer.
1551  *      @timeout: command timeout
1552  *      @retries: number of retries before failing
1553  *      @data: returns a structure abstracting the mode header data
1554  *
1555  *      Returns zero if unsuccessful, or the header offset (either 4
1556  *      or 8 depending on whether a six or ten byte command was
1557  *      issued) if successful.
1558  **/
1559 int
1560 __scsi_mode_sense(struct scsi_request *sreq, int dbd, int modepage,
1561                   unsigned char *buffer, int len, int timeout, int retries,
1562                   struct scsi_mode_data *data) {
1563         unsigned char cmd[12];
1564         int use_10_for_ms;
1565         int header_length;
1566
1567         memset(data, 0, sizeof(*data));
1568         memset(&cmd[0], 0, 12);
1569         cmd[1] = dbd & 0x18;    /* allows DBD and LLBA bits */
1570         cmd[2] = modepage;
1571
1572  retry:
1573         use_10_for_ms = sreq->sr_device->use_10_for_ms;
1574
1575         if (use_10_for_ms) {
1576                 if (len < 8)
1577                         len = 8;
1578
1579                 cmd[0] = MODE_SENSE_10;
1580                 cmd[8] = len;
1581                 header_length = 8;
1582         } else {
1583                 if (len < 4)
1584                         len = 4;
1585
1586                 cmd[0] = MODE_SENSE;
1587                 cmd[4] = len;
1588                 header_length = 4;
1589         }
1590
1591         sreq->sr_cmd_len = 0;
1592         memset(sreq->sr_sense_buffer, 0, sizeof(sreq->sr_sense_buffer));
1593         sreq->sr_data_direction = DMA_FROM_DEVICE;
1594
1595         memset(buffer, 0, len);
1596
1597         scsi_wait_req(sreq, cmd, buffer, len, timeout, retries);
1598
1599         /* This code looks awful: what it's doing is making sure an
1600          * ILLEGAL REQUEST sense return identifies the actual command
1601          * byte as the problem.  MODE_SENSE commands can return
1602          * ILLEGAL REQUEST if the code page isn't supported */
1603
1604         if (use_10_for_ms && !scsi_status_is_good(sreq->sr_result) &&
1605             (driver_byte(sreq->sr_result) & DRIVER_SENSE)) {
1606                 struct scsi_sense_hdr sshdr;
1607
1608                 if (scsi_request_normalize_sense(sreq, &sshdr)) {
1609                         if ((sshdr.sense_key == ILLEGAL_REQUEST) &&
1610                             (sshdr.asc == 0x20) && (sshdr.ascq == 0)) {
1611                                 /* 
1612                                  * Invalid command operation code
1613                                  */
1614                                 sreq->sr_device->use_10_for_ms = 0;
1615                                 goto retry;
1616                         }
1617                 }
1618         }
1619
1620         if(scsi_status_is_good(sreq->sr_result)) {
1621                 data->header_length = header_length;
1622                 if(use_10_for_ms) {
1623                         data->length = buffer[0]*256 + buffer[1] + 2;
1624                         data->medium_type = buffer[2];
1625                         data->device_specific = buffer[3];
1626                         data->longlba = buffer[4] & 0x01;
1627                         data->block_descriptor_length = buffer[6]*256
1628                                 + buffer[7];
1629                 } else {
1630                         data->length = buffer[0] + 1;
1631                         data->medium_type = buffer[1];
1632                         data->device_specific = buffer[2];
1633                         data->block_descriptor_length = buffer[3];
1634                 }
1635         }
1636
1637         return sreq->sr_result;
1638 }
1639 EXPORT_SYMBOL(__scsi_mode_sense);
1640
1641 /**
1642  *      scsi_mode_sense - issue a mode sense, falling back from 10 to 
1643  *              six bytes if necessary.
1644  *      @sdev:  scsi device to send command to.
1645  *      @dbd:   set if mode sense will disable block descriptors in the return
1646  *      @modepage: mode page being requested
1647  *      @buffer: request buffer (may not be smaller than eight bytes)
1648  *      @len:   length of request buffer.
1649  *      @timeout: command timeout
1650  *      @retries: number of retries before failing
1651  *
1652  *      Returns zero if unsuccessful, or the header offset (either 4
1653  *      or 8 depending on whether a six or ten byte command was
1654  *      issued) if successful.
1655  **/
1656 int
1657 scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1658                 unsigned char *buffer, int len, int timeout, int retries,
1659                 struct scsi_mode_data *data)
1660 {
1661         struct scsi_request *sreq = scsi_allocate_request(sdev, GFP_KERNEL);
1662         int ret;
1663
1664         if (!sreq)
1665                 return -1;
1666
1667         ret = __scsi_mode_sense(sreq, dbd, modepage, buffer, len,
1668                                 timeout, retries, data);
1669
1670         scsi_release_request(sreq);
1671
1672         return ret;
1673 }
1674 EXPORT_SYMBOL(scsi_mode_sense);
1675
1676 int
1677 scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries)
1678 {
1679         struct scsi_request *sreq;
1680         char cmd[] = {
1681                 TEST_UNIT_READY, 0, 0, 0, 0, 0,
1682         };
1683         int result;
1684         
1685         sreq = scsi_allocate_request(sdev, GFP_KERNEL);
1686         if (!sreq)
1687                 return -ENOMEM;
1688
1689         sreq->sr_data_direction = DMA_NONE;
1690         scsi_wait_req(sreq, cmd, NULL, 0, timeout, retries);
1691
1692         if ((driver_byte(sreq->sr_result) & DRIVER_SENSE) && sdev->removable) {
1693                 struct scsi_sense_hdr sshdr;
1694
1695                 if ((scsi_request_normalize_sense(sreq, &sshdr)) &&
1696                     ((sshdr.sense_key == UNIT_ATTENTION) ||
1697                      (sshdr.sense_key == NOT_READY))) {
1698                         sdev->changed = 1;
1699                         sreq->sr_result = 0;
1700                 }
1701         }
1702         result = sreq->sr_result;
1703         scsi_release_request(sreq);
1704         return result;
1705 }
1706 EXPORT_SYMBOL(scsi_test_unit_ready);
1707
1708 /**
1709  *      scsi_device_set_state - Take the given device through the device
1710  *              state model.
1711  *      @sdev:  scsi device to change the state of.
1712  *      @state: state to change to.
1713  *
1714  *      Returns zero if unsuccessful or an error if the requested 
1715  *      transition is illegal.
1716  **/
1717 int
1718 scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
1719 {
1720         enum scsi_device_state oldstate = sdev->sdev_state;
1721
1722         if (state == oldstate)
1723                 return 0;
1724
1725         switch (state) {
1726         case SDEV_CREATED:
1727                 /* There are no legal states that come back to
1728                  * created.  This is the manually initialised start
1729                  * state */
1730                 goto illegal;
1731                         
1732         case SDEV_RUNNING:
1733                 switch (oldstate) {
1734                 case SDEV_CREATED:
1735                 case SDEV_OFFLINE:
1736                 case SDEV_QUIESCE:
1737                 case SDEV_BLOCK:
1738                         break;
1739                 default:
1740                         goto illegal;
1741                 }
1742                 break;
1743
1744         case SDEV_QUIESCE:
1745                 switch (oldstate) {
1746                 case SDEV_RUNNING:
1747                 case SDEV_OFFLINE:
1748                         break;
1749                 default:
1750                         goto illegal;
1751                 }
1752                 break;
1753
1754         case SDEV_OFFLINE:
1755                 switch (oldstate) {
1756                 case SDEV_CREATED:
1757                 case SDEV_RUNNING:
1758                 case SDEV_QUIESCE:
1759                 case SDEV_BLOCK:
1760                         break;
1761                 default:
1762                         goto illegal;
1763                 }
1764                 break;
1765
1766         case SDEV_BLOCK:
1767                 switch (oldstate) {
1768                 case SDEV_CREATED:
1769                 case SDEV_RUNNING:
1770                         break;
1771                 default:
1772                         goto illegal;
1773                 }
1774                 break;
1775
1776         case SDEV_CANCEL:
1777                 switch (oldstate) {
1778                 case SDEV_CREATED:
1779                 case SDEV_RUNNING:
1780                 case SDEV_OFFLINE:
1781                 case SDEV_BLOCK:
1782                         break;
1783                 default:
1784                         goto illegal;
1785                 }
1786                 break;
1787
1788         case SDEV_DEL:
1789                 switch (oldstate) {
1790                 case SDEV_CANCEL:
1791                         break;
1792                 default:
1793                         goto illegal;
1794                 }
1795                 break;
1796
1797         }
1798         sdev->sdev_state = state;
1799         return 0;
1800
1801  illegal:
1802         SCSI_LOG_ERROR_RECOVERY(1, 
1803                                 dev_printk(KERN_ERR, &sdev->sdev_gendev,
1804                                            "Illegal state transition %s->%s\n",
1805                                            scsi_device_state_name(oldstate),
1806                                            scsi_device_state_name(state))
1807                                 );
1808         return -EINVAL;
1809 }
1810 EXPORT_SYMBOL(scsi_device_set_state);
1811
1812 /**
1813  *      scsi_device_quiesce - Block user issued commands.
1814  *      @sdev:  scsi device to quiesce.
1815  *
1816  *      This works by trying to transition to the SDEV_QUIESCE state
1817  *      (which must be a legal transition).  When the device is in this
1818  *      state, only special requests will be accepted, all others will
1819  *      be deferred.  Since special requests may also be requeued requests,
1820  *      a successful return doesn't guarantee the device will be 
1821  *      totally quiescent.
1822  *
1823  *      Must be called with user context, may sleep.
1824  *
1825  *      Returns zero if unsuccessful or an error if not.
1826  **/
1827 int
1828 scsi_device_quiesce(struct scsi_device *sdev)
1829 {
1830         int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
1831         if (err)
1832                 return err;
1833
1834         scsi_run_queue(sdev->request_queue);
1835         while (sdev->device_busy) {
1836                 msleep_interruptible(200);
1837                 scsi_run_queue(sdev->request_queue);
1838         }
1839         return 0;
1840 }
1841 EXPORT_SYMBOL(scsi_device_quiesce);
1842
1843 /**
1844  *      scsi_device_resume - Restart user issued commands to a quiesced device.
1845  *      @sdev:  scsi device to resume.
1846  *
1847  *      Moves the device from quiesced back to running and restarts the
1848  *      queues.
1849  *
1850  *      Must be called with user context, may sleep.
1851  **/
1852 void
1853 scsi_device_resume(struct scsi_device *sdev)
1854 {
1855         if(scsi_device_set_state(sdev, SDEV_RUNNING))
1856                 return;
1857         scsi_run_queue(sdev->request_queue);
1858 }
1859 EXPORT_SYMBOL(scsi_device_resume);
1860
1861 static void
1862 device_quiesce_fn(struct scsi_device *sdev, void *data)
1863 {
1864         scsi_device_quiesce(sdev);
1865 }
1866
1867 void
1868 scsi_target_quiesce(struct scsi_target *starget)
1869 {
1870         starget_for_each_device(starget, NULL, device_quiesce_fn);
1871 }
1872 EXPORT_SYMBOL(scsi_target_quiesce);
1873
1874 static void
1875 device_resume_fn(struct scsi_device *sdev, void *data)
1876 {
1877         scsi_device_resume(sdev);
1878 }
1879
1880 void
1881 scsi_target_resume(struct scsi_target *starget)
1882 {
1883         starget_for_each_device(starget, NULL, device_resume_fn);
1884 }
1885 EXPORT_SYMBOL(scsi_target_resume);
1886
1887 /**
1888  * scsi_internal_device_block - internal function to put a device
1889  *                              temporarily into the SDEV_BLOCK state
1890  * @sdev:       device to block
1891  *
1892  * Block request made by scsi lld's to temporarily stop all
1893  * scsi commands on the specified device.  Called from interrupt
1894  * or normal process context.
1895  *
1896  * Returns zero if successful or error if not
1897  *
1898  * Notes:       
1899  *      This routine transitions the device to the SDEV_BLOCK state
1900  *      (which must be a legal transition).  When the device is in this
1901  *      state, all commands are deferred until the scsi lld reenables
1902  *      the device with scsi_device_unblock or device_block_tmo fires.
1903  *      This routine assumes the host_lock is held on entry.
1904  **/
1905 int
1906 scsi_internal_device_block(struct scsi_device *sdev)
1907 {
1908         request_queue_t *q = sdev->request_queue;
1909         unsigned long flags;
1910         int err = 0;
1911
1912         err = scsi_device_set_state(sdev, SDEV_BLOCK);
1913         if (err)
1914                 return err;
1915
1916         /* 
1917          * The device has transitioned to SDEV_BLOCK.  Stop the
1918          * block layer from calling the midlayer with this device's
1919          * request queue. 
1920          */
1921         spin_lock_irqsave(q->queue_lock, flags);
1922         blk_stop_queue(q);
1923         spin_unlock_irqrestore(q->queue_lock, flags);
1924
1925         return 0;
1926 }
1927 EXPORT_SYMBOL_GPL(scsi_internal_device_block);
1928  
1929 /**
1930  * scsi_internal_device_unblock - resume a device after a block request
1931  * @sdev:       device to resume
1932  *
1933  * Called by scsi lld's or the midlayer to restart the device queue
1934  * for the previously suspended scsi device.  Called from interrupt or
1935  * normal process context.
1936  *
1937  * Returns zero if successful or error if not.
1938  *
1939  * Notes:       
1940  *      This routine transitions the device to the SDEV_RUNNING state
1941  *      (which must be a legal transition) allowing the midlayer to
1942  *      goose the queue for this device.  This routine assumes the 
1943  *      host_lock is held upon entry.
1944  **/
1945 int
1946 scsi_internal_device_unblock(struct scsi_device *sdev)
1947 {
1948         request_queue_t *q = sdev->request_queue; 
1949         int err;
1950         unsigned long flags;
1951         
1952         /* 
1953          * Try to transition the scsi device to SDEV_RUNNING
1954          * and goose the device queue if successful.  
1955          */
1956         err = scsi_device_set_state(sdev, SDEV_RUNNING);
1957         if (err)
1958                 return err;
1959
1960         spin_lock_irqsave(q->queue_lock, flags);
1961         blk_start_queue(q);
1962         spin_unlock_irqrestore(q->queue_lock, flags);
1963
1964         return 0;
1965 }
1966 EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
1967
1968 static void
1969 device_block(struct scsi_device *sdev, void *data)
1970 {
1971         scsi_internal_device_block(sdev);
1972 }
1973
1974 static int
1975 target_block(struct device *dev, void *data)
1976 {
1977         if (scsi_is_target_device(dev))
1978                 starget_for_each_device(to_scsi_target(dev), NULL,
1979                                         device_block);
1980         return 0;
1981 }
1982
1983 void
1984 scsi_target_block(struct device *dev)
1985 {
1986         if (scsi_is_target_device(dev))
1987                 starget_for_each_device(to_scsi_target(dev), NULL,
1988                                         device_block);
1989         else
1990                 device_for_each_child(dev, NULL, target_block);
1991 }
1992 EXPORT_SYMBOL_GPL(scsi_target_block);
1993
1994 static void
1995 device_unblock(struct scsi_device *sdev, void *data)
1996 {
1997         scsi_internal_device_unblock(sdev);
1998 }
1999
2000 static int
2001 target_unblock(struct device *dev, void *data)
2002 {
2003         if (scsi_is_target_device(dev))
2004                 starget_for_each_device(to_scsi_target(dev), NULL,
2005                                         device_unblock);
2006         return 0;
2007 }
2008
2009 void
2010 scsi_target_unblock(struct device *dev)
2011 {
2012         if (scsi_is_target_device(dev))
2013                 starget_for_each_device(to_scsi_target(dev), NULL,
2014                                         device_unblock);
2015         else
2016                 device_for_each_child(dev, NULL, target_unblock);
2017 }
2018 EXPORT_SYMBOL_GPL(scsi_target_unblock);