[PATCH] WAN: update info page for a bunch of my drivers
[safe/jmp/linux-2.6] / drivers / scsi / scsi.c
index d1aa95d..2ab7df0 100644 (file)
 #include <linux/delay.h>
 #include <linux/init.h>
 #include <linux/completion.h>
-#include <linux/devfs_fs_kernel.h>
 #include <linux/unistd.h>
 #include <linux/spinlock.h>
 #include <linux/kmod.h>
 #include <linux/interrupt.h>
 #include <linux/notifier.h>
 #include <linux/cpu.h>
+#include <linux/mutex.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_eh.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_tcq.h>
-#include <scsi/scsi_request.h>
 
 #include "scsi_priv.h"
 #include "scsi_logging.h"
 
 static void scsi_done(struct scsi_cmnd *cmd);
-static int scsi_retry_command(struct scsi_cmnd *cmd);
 
 /*
  * Definitions and constants.
@@ -117,86 +115,12 @@ const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE] = {
 };
 EXPORT_SYMBOL(scsi_device_types);
 
-/*
- * Function:    scsi_allocate_request
- *
- * Purpose:     Allocate a request descriptor.
- *
- * Arguments:   device         - device for which we want a request
- *             gfp_mask        - allocation flags passed to kmalloc
- *
- * Lock status: No locks assumed to be held.  This function is SMP-safe.
- *
- * Returns:     Pointer to request block.
- */
-struct scsi_request *scsi_allocate_request(struct scsi_device *sdev,
-                                          int gfp_mask)
-{
-       const int offset = ALIGN(sizeof(struct scsi_request), 4);
-       const int size = offset + sizeof(struct request);
-       struct scsi_request *sreq;
-  
-       sreq = kmalloc(size, gfp_mask);
-       if (likely(sreq != NULL)) {
-               memset(sreq, 0, size);
-               sreq->sr_request = (struct request *)(((char *)sreq) + offset);
-               sreq->sr_device = sdev;
-               sreq->sr_host = sdev->host;
-               sreq->sr_magic = SCSI_REQ_MAGIC;
-               sreq->sr_data_direction = DMA_BIDIRECTIONAL;
-       }
-
-       return sreq;
-}
-EXPORT_SYMBOL(scsi_allocate_request);
-
-void __scsi_release_request(struct scsi_request *sreq)
-{
-       struct request *req = sreq->sr_request;
-
-       /* unlikely because the tag was usually ended earlier by the
-        * mid-layer. However, for layering reasons ULD's don't end
-        * the tag of commands they generate. */
-       if (unlikely(blk_rq_tagged(req))) {
-               unsigned long flags;
-               struct request_queue *q = req->q;
-
-               spin_lock_irqsave(q->queue_lock, flags);
-               blk_queue_end_tag(q, req);
-               spin_unlock_irqrestore(q->queue_lock, flags);
-       }
-
-
-       if (likely(sreq->sr_command != NULL)) {
-               struct scsi_cmnd *cmd = sreq->sr_command;
-
-               sreq->sr_command = NULL;
-               scsi_next_command(cmd);
-       }
-}
-
-/*
- * Function:    scsi_release_request
- *
- * Purpose:     Release a request descriptor.
- *
- * Arguments:   sreq    - request to release
- *
- * Lock status: No locks assumed to be held.  This function is SMP-safe.
- */
-void scsi_release_request(struct scsi_request *sreq)
-{
-       __scsi_release_request(sreq);
-       kfree(sreq);
-}
-EXPORT_SYMBOL(scsi_release_request);
-
 struct scsi_host_cmd_pool {
        kmem_cache_t    *slab;
        unsigned int    users;
        char            *name;
        unsigned int    slab_flags;
-       unsigned int    gfp_mask;
+       gfp_t           gfp_mask;
 };
 
 static struct scsi_host_cmd_pool scsi_cmd_pool = {
@@ -210,10 +134,10 @@ static struct scsi_host_cmd_pool scsi_cmd_dma_pool = {
        .gfp_mask       = __GFP_DMA,
 };
 
-static DECLARE_MUTEX(host_cmd_pool_mutex);
+static DEFINE_MUTEX(host_cmd_pool_mutex);
 
 static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost,
-                                           int gfp_mask)
+                                           gfp_t gfp_mask)
 {
        struct scsi_cmnd *cmd;
 
@@ -245,7 +169,7 @@ static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost,
  *
  * Returns:    The allocated scsi command structure.
  */
-struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, int gfp_mask)
+struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
 {
        struct scsi_cmnd *cmd;
 
@@ -265,6 +189,7 @@ struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, int gfp_mask)
                spin_lock_irqsave(&dev->list_lock, flags);
                list_add_tail(&cmd->list, &dev->cmd_list);
                spin_unlock_irqrestore(&dev->list_lock, flags);
+               cmd->jiffies_at_alloc = jiffies;
        } else
                put_device(&dev->sdev_gendev);
 
@@ -330,7 +255,7 @@ int scsi_setup_command_freelist(struct Scsi_Host *shost)
         * Select a command slab for this host and create it if not
         * yet existant.
         */
-       down(&host_cmd_pool_mutex);
+       mutex_lock(&host_cmd_pool_mutex);
        pool = (shost->unchecked_isa_dma ? &scsi_cmd_dma_pool : &scsi_cmd_pool);
        if (!pool->users) {
                pool->slab = kmem_cache_create(pool->name,
@@ -342,7 +267,7 @@ int scsi_setup_command_freelist(struct Scsi_Host *shost)
 
        pool->users++;
        shost->cmd_pool = pool;
-       up(&host_cmd_pool_mutex);
+       mutex_unlock(&host_cmd_pool_mutex);
 
        /*
         * Get one backup command for this host.
@@ -359,7 +284,7 @@ int scsi_setup_command_freelist(struct Scsi_Host *shost)
                kmem_cache_destroy(pool->slab);
        return -ENOMEM;
  fail:
-       up(&host_cmd_pool_mutex);
+       mutex_unlock(&host_cmd_pool_mutex);
        return -ENOMEM;
 
 }
@@ -381,10 +306,10 @@ void scsi_destroy_command_freelist(struct Scsi_Host *shost)
                kmem_cache_free(shost->cmd_pool->slab, cmd);
        }
 
-       down(&host_cmd_pool_mutex);
+       mutex_lock(&host_cmd_pool_mutex);
        if (!--shost->cmd_pool->users)
                kmem_cache_destroy(shost->cmd_pool->slab);
-       up(&host_cmd_pool_mutex);
+       mutex_unlock(&host_cmd_pool_mutex);
 }
 
 #ifdef CONFIG_SCSI_LOGGING
@@ -409,9 +334,7 @@ void scsi_log_send(struct scsi_cmnd *cmd)
                                       SCSI_LOG_MLQUEUE_BITS);
                if (level > 1) {
                        sdev = cmd->device;
-                       printk(KERN_INFO "scsi <%d:%d:%d:%d> send ",
-                              sdev->host->host_no, sdev->channel, sdev->id,
-                              sdev->lun);
+                       sdev_printk(KERN_INFO, sdev, "send ");
                        if (level > 2)
                                printk("0x%p ", cmd);
                        /*
@@ -455,9 +378,7 @@ void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
                if (((level > 0) && (cmd->result || disposition != SUCCESS)) ||
                    (level > 1)) {
                        sdev = cmd->device;
-                       printk(KERN_INFO "scsi <%d:%d:%d:%d> done ",
-                              sdev->host->host_no, sdev->channel, sdev->id,
-                              sdev->lun);
+                       sdev_printk(KERN_INFO, sdev, "done ");
                        if (level > 2)
                                printk("0x%p ", cmd);
                        /*
@@ -570,7 +491,8 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
        /* 
         * If SCSI-2 or lower, store the LUN value in cmnd.
         */
-       if (cmd->device->scsi_level <= SCSI_2) {
+       if (cmd->device->scsi_level <= SCSI_2 &&
+           cmd->device->scsi_level != SCSI_UNKNOWN) {
                cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
                               (cmd->device->lun << 5 & 0xe0);
        }
@@ -650,73 +572,6 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
        return rtn;
 }
 
-/*
- * Function:    scsi_init_cmd_from_req
- *
- * Purpose:     Queue a SCSI command
- * Purpose:     Initialize a struct scsi_cmnd from a struct scsi_request
- *
- * Arguments:   cmd       - command descriptor.
- *              sreq      - Request from the queue.
- *
- * Lock status: None needed.
- *
- * Returns:     Nothing.
- *
- * Notes:       Mainly transfer data from the request structure to the
- *              command structure.  The request structure is allocated
- *              using the normal memory allocator, and requests can pile
- *              up to more or less any depth.  The command structure represents
- *              a consumable resource, as these are allocated into a pool
- *              when the SCSI subsystem initializes.  The preallocation is
- *              required so that in low-memory situations a disk I/O request
- *              won't cause the memory manager to try and write out a page.
- *              The request structure is generally used by ioctls and character
- *              devices.
- */
-void scsi_init_cmd_from_req(struct scsi_cmnd *cmd, struct scsi_request *sreq)
-{
-       sreq->sr_command = cmd;
-
-       cmd->cmd_len = sreq->sr_cmd_len;
-       cmd->use_sg = sreq->sr_use_sg;
-
-       cmd->request = sreq->sr_request;
-       memcpy(cmd->data_cmnd, sreq->sr_cmnd, sizeof(cmd->data_cmnd));
-       cmd->serial_number = 0;
-       cmd->bufflen = sreq->sr_bufflen;
-       cmd->buffer = sreq->sr_buffer;
-       cmd->retries = 0;
-       cmd->allowed = sreq->sr_allowed;
-       cmd->done = sreq->sr_done;
-       cmd->timeout_per_command = sreq->sr_timeout_per_command;
-       cmd->sc_data_direction = sreq->sr_data_direction;
-       cmd->sglist_len = sreq->sr_sglist_len;
-       cmd->underflow = sreq->sr_underflow;
-       cmd->sc_request = sreq;
-       memcpy(cmd->cmnd, sreq->sr_cmnd, sizeof(sreq->sr_cmnd));
-
-       /*
-        * Zero the sense buffer.  Some host adapters automatically request
-        * sense on error.  0 is not a valid sense code.
-        */
-       memset(cmd->sense_buffer, 0, sizeof(sreq->sr_sense_buffer));
-       cmd->request_buffer = sreq->sr_buffer;
-       cmd->request_bufflen = sreq->sr_bufflen;
-       cmd->old_use_sg = cmd->use_sg;
-       if (cmd->cmd_len == 0)
-               cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
-       cmd->old_cmd_len = cmd->cmd_len;
-       cmd->sc_old_data_direction = cmd->sc_data_direction;
-       cmd->old_underflow = cmd->underflow;
-
-       /*
-        * Start the timer ticking.
-        */
-       cmd->result = 0;
-
-       SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_init_cmd_from_req()\n"));
-}
 
 /*
  * Per-CPU I/O completion queue.
@@ -724,6 +579,24 @@ void scsi_init_cmd_from_req(struct scsi_cmnd *cmd, struct scsi_request *sreq)
 static DEFINE_PER_CPU(struct list_head, scsi_done_q);
 
 /**
+ * scsi_req_abort_cmd -- Request command recovery for the specified command
+ * cmd: pointer to the SCSI command of interest
+ *
+ * This function requests that SCSI Core start recovery for the
+ * command by deleting the timer and adding the command to the eh
+ * queue.  It can be called by either LLDDs or SCSI Core.  LLDDs who
+ * implement their own error recovery MAY ignore the timeout event if
+ * they generated scsi_req_abort_cmd.
+ */
+void scsi_req_abort_cmd(struct scsi_cmnd *cmd)
+{
+       if (!scsi_delete_timer(cmd))
+               return;
+       scsi_times_out(cmd);
+}
+EXPORT_SYMBOL(scsi_req_abort_cmd);
+
+/**
  * scsi_done - Enqueue the finished SCSI command into the done queue.
  * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
  * ownership back to SCSI Core -- i.e. the LLDD has finished with it.
@@ -755,7 +628,7 @@ static void scsi_done(struct scsi_cmnd *cmd)
  * isn't running --- used by scsi_times_out */
 void __scsi_done(struct scsi_cmnd *cmd)
 {
-       unsigned long flags;
+       struct request *rq = cmd->request;
 
        /*
         * Set the serial numbers back to zero
@@ -766,57 +639,14 @@ void __scsi_done(struct scsi_cmnd *cmd)
        if (cmd->result)
                atomic_inc(&cmd->device->ioerr_cnt);
 
+       BUG_ON(!rq);
+
        /*
-        * Next, enqueue the command into the done queue.
-        * It is a per-CPU queue, so we just disable local interrupts
-        * and need no spinlock.
+        * The uptodate/nbytes values don't matter, as we allow partial
+        * completes and thus will check this in the softirq callback
         */
-       local_irq_save(flags);
-       list_add_tail(&cmd->eh_entry, &__get_cpu_var(scsi_done_q));
-       raise_softirq_irqoff(SCSI_SOFTIRQ);
-       local_irq_restore(flags);
-}
-
-/**
- * scsi_softirq - Perform post-interrupt processing of finished SCSI commands.
- *
- * This is the consumer of the done queue.
- *
- * This is called with all interrupts enabled.  This should reduce
- * interrupt latency, stack depth, and reentrancy of the low-level
- * drivers.
- */
-static void scsi_softirq(struct softirq_action *h)
-{
-       int disposition;
-       LIST_HEAD(local_q);
-
-       local_irq_disable();
-       list_splice_init(&__get_cpu_var(scsi_done_q), &local_q);
-       local_irq_enable();
-
-       while (!list_empty(&local_q)) {
-               struct scsi_cmnd *cmd = list_entry(local_q.next,
-                                                  struct scsi_cmnd, eh_entry);
-               list_del_init(&cmd->eh_entry);
-
-               disposition = scsi_decide_disposition(cmd);
-               scsi_log_completion(cmd, disposition);
-               switch (disposition) {
-               case SUCCESS:
-                       scsi_finish_command(cmd);
-                       break;
-               case NEEDS_RETRY:
-                       scsi_retry_command(cmd);
-                       break;
-               case ADD_TO_MLQUEUE:
-                       scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
-                       break;
-               default:
-                       if (!scsi_eh_scmd_add(cmd, 0))
-                               scsi_finish_command(cmd);
-               }
-       }
+       rq->completion_data = cmd;
+       blk_complete_request(rq);
 }
 
 /*
@@ -829,7 +659,7 @@ static void scsi_softirq(struct softirq_action *h)
  *              level drivers should not become re-entrant as a result of
  *              this.
  */
-static int scsi_retry_command(struct scsi_cmnd *cmd)
+int scsi_retry_command(struct scsi_cmnd *cmd)
 {
        /*
         * Restore the SCSI command state.
@@ -856,7 +686,6 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
 {
        struct scsi_device *sdev = cmd->device;
        struct Scsi_Host *shost = sdev->host;
-       struct scsi_request *sreq;
 
        scsi_device_unbusy(sdev);
 
@@ -878,28 +707,14 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
        if (SCSI_SENSE_VALID(cmd))
                cmd->result |= (DRIVER_SENSE << 24);
 
-       SCSI_LOG_MLCOMPLETE(4, printk("Notifying upper driver of completion "
-                               "for device %d %x\n", sdev->id, cmd->result));
+       SCSI_LOG_MLCOMPLETE(4, sdev_printk(KERN_INFO, sdev,
+                               "Notifying upper driver of completion "
+                               "(result %x)\n", cmd->result));
 
        /*
         * We can get here with use_sg=0, causing a panic in the upper level
         */
        cmd->use_sg = cmd->old_use_sg;
-
-       /*
-        * If there is an associated request structure, copy the data over
-        * before we call the completion function.
-        */
-       sreq = cmd->sc_request;
-       if (sreq) {
-              sreq->sr_result = sreq->sr_command->result;
-              if (sreq->sr_result) {
-                      memcpy(sreq->sr_sense_buffer,
-                             sreq->sr_command->sense_buffer,
-                             sizeof(sreq->sr_sense_buffer));
-              }
-       }
-
        cmd->done(cmd);
 }
 EXPORT_SYMBOL(scsi_finish_command);
@@ -955,10 +770,9 @@ void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags)
                        sdev->simple_tags = 1;
                        break;
                default:
-                       printk(KERN_WARNING "(scsi%d:%d:%d:%d) "
-                               "scsi_adjust_queue_depth, bad queue type, "
-                               "disabled\n", sdev->host->host_no,
-                               sdev->channel, sdev->id, sdev->lun); 
+                       sdev_printk(KERN_WARNING, sdev,
+                                   "scsi_adjust_queue_depth, bad queue type, "
+                                   "disabled\n");
                case 0:
                        sdev->ordered_tags = sdev->simple_tags = 0;
                        sdev->queue_depth = tags;
@@ -1250,9 +1064,8 @@ int scsi_device_cancel(struct scsi_device *sdev, int recovery)
                list_for_each_safe(lh, lh_sf, &active_list) {
                        scmd = list_entry(lh, struct scsi_cmnd, eh_entry);
                        list_del_init(lh);
-                       if (recovery) {
-                               scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD);
-                       } else {
+                       if (recovery &&
+                           !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD)) {
                                scmd->result = (DID_ABORT << 16);
                                scsi_finish_command(scmd);
                        }
@@ -1263,38 +1076,6 @@ int scsi_device_cancel(struct scsi_device *sdev, int recovery)
 }
 EXPORT_SYMBOL(scsi_device_cancel);
 
-#ifdef CONFIG_HOTPLUG_CPU
-static int scsi_cpu_notify(struct notifier_block *self,
-                          unsigned long action, void *hcpu)
-{
-       int cpu = (unsigned long)hcpu;
-
-       switch(action) {
-       case CPU_DEAD:
-               /* Drain scsi_done_q. */
-               local_irq_disable();
-               list_splice_init(&per_cpu(scsi_done_q, cpu),
-                                &__get_cpu_var(scsi_done_q));
-               raise_softirq_irqoff(SCSI_SOFTIRQ);
-               local_irq_enable();
-               break;
-       default:
-               break;
-       }
-       return NOTIFY_OK;
-}
-
-static struct notifier_block __devinitdata scsi_cpu_nb = {
-       .notifier_call  = scsi_cpu_notify,
-};
-
-#define register_scsi_cpu() register_cpu_notifier(&scsi_cpu_nb)
-#define unregister_scsi_cpu() unregister_cpu_notifier(&scsi_cpu_nb)
-#else
-#define register_scsi_cpu()
-#define unregister_scsi_cpu()
-#endif /* CONFIG_HOTPLUG_CPU */
-
 MODULE_DESCRIPTION("SCSI core");
 MODULE_LICENSE("GPL");
 
@@ -1324,12 +1105,9 @@ static int __init init_scsi(void)
        if (error)
                goto cleanup_sysctl;
 
-       for (i = 0; i < NR_CPUS; i++)
+       for_each_possible_cpu(i)
                INIT_LIST_HEAD(&per_cpu(scsi_done_q, i));
 
-       devfs_mk_dir("scsi");
-       open_softirq(SCSI_SOFTIRQ, scsi_softirq, NULL);
-       register_scsi_cpu();
        printk(KERN_NOTICE "SCSI subsystem initialized\n");
        return 0;
 
@@ -1354,10 +1132,8 @@ static void __exit exit_scsi(void)
        scsi_exit_sysctl();
        scsi_exit_hosts();
        scsi_exit_devinfo();
-       devfs_remove("scsi");
        scsi_exit_procfs();
        scsi_exit_queue();
-       unregister_scsi_cpu();
 }
 
 subsys_initcall(init_scsi);