#define MIN_RESET_PERIOD (15*HZ)
/*
- * Macro to determine the size of SCSI command. This macro takes vendor
- * unique commands into account. SCSI commands in groups 6 and 7 are
- * vendor unique and we will depend upon the command length being
- * supplied correctly in cmd_len.
- */
-#define CDB_SIZE(cmd) (((((cmd)->cmnd[0] >> 5) & 7) < 6) ? \
- COMMAND_SIZE((cmd)->cmnd[0]) : (cmd)->cmd_len)
-
-/*
* Note - the initial logging level can be set here to log events at boot time.
* After the system is up, you may enable logging via the /proc interface.
*/
EXPORT_SYMBOL(scsi_device_type);
struct scsi_host_cmd_pool {
- struct kmem_cache *slab;
- unsigned int users;
- char *name;
- unsigned int slab_flags;
- gfp_t gfp_mask;
+ struct kmem_cache *cmd_slab;
+ struct kmem_cache *sense_slab;
+ unsigned int users;
+ char *cmd_name;
+ char *sense_name;
+ unsigned int slab_flags;
+ gfp_t gfp_mask;
};
static struct scsi_host_cmd_pool scsi_cmd_pool = {
- .name = "scsi_cmd_cache",
+ .cmd_name = "scsi_cmd_cache",
+ .sense_name = "scsi_sense_cache",
.slab_flags = SLAB_HWCACHE_ALIGN,
};
static struct scsi_host_cmd_pool scsi_cmd_dma_pool = {
- .name = "scsi_cmd_cache(DMA)",
+ .cmd_name = "scsi_cmd_cache(DMA)",
+ .sense_name = "scsi_sense_cache(DMA)",
.slab_flags = SLAB_HWCACHE_ALIGN|SLAB_CACHE_DMA,
.gfp_mask = __GFP_DMA,
};
static DEFINE_MUTEX(host_cmd_pool_mutex);
/**
+ * scsi_pool_alloc_command - internal function to get a fully allocated command
+ * @pool: slab pool to allocate the command from
+ * @gfp_mask: mask for the allocation
+ *
+ * Returns a fully allocated command (with the allied sense buffer) or
+ * NULL on failure
+ */
+static struct scsi_cmnd *
+scsi_pool_alloc_command(struct scsi_host_cmd_pool *pool, gfp_t gfp_mask)
+{
+ struct scsi_cmnd *cmd;
+
+ cmd = kmem_cache_alloc(pool->cmd_slab, gfp_mask | pool->gfp_mask);
+ if (!cmd)
+ return NULL;
+
+ memset(cmd, 0, sizeof(*cmd));
+
+ cmd->sense_buffer = kmem_cache_alloc(pool->sense_slab,
+ gfp_mask | pool->gfp_mask);
+ if (!cmd->sense_buffer) {
+ kmem_cache_free(pool->cmd_slab, cmd);
+ return NULL;
+ }
+
+ return cmd;
+}
+
+/**
+ * scsi_pool_free_command - internal function to release a command
+ * @pool: slab pool to allocate the command from
+ * @cmd: command to release
+ *
+ * the command must previously have been allocated by
+ * scsi_pool_alloc_command.
+ */
+static void
+scsi_pool_free_command(struct scsi_host_cmd_pool *pool,
+ struct scsi_cmnd *cmd)
+{
+ if (cmd->prot_sdb)
+ kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb);
+
+ kmem_cache_free(pool->sense_slab, cmd->sense_buffer);
+ kmem_cache_free(pool->cmd_slab, cmd);
+}
+
+/**
+ * scsi_host_alloc_command - internal function to allocate command
+ * @shost: SCSI host whose pool to allocate from
+ * @gfp_mask: mask for the allocation
+ *
+ * Returns a fully allocated command with sense buffer and protection
+ * data buffer (where applicable) or NULL on failure
+ */
+static struct scsi_cmnd *
+scsi_host_alloc_command(struct Scsi_Host *shost, gfp_t gfp_mask)
+{
+ struct scsi_cmnd *cmd;
+
+ cmd = scsi_pool_alloc_command(shost->cmd_pool, gfp_mask);
+ if (!cmd)
+ return NULL;
+
+ if (scsi_host_get_prot(shost) >= SHOST_DIX_TYPE0_PROTECTION) {
+ cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp_mask);
+
+ if (!cmd->prot_sdb) {
+ scsi_pool_free_command(shost->cmd_pool, cmd);
+ return NULL;
+ }
+ }
+
+ return cmd;
+}
+
+/**
* __scsi_get_command - Allocate a struct scsi_cmnd
* @shost: host to transmit command
* @gfp_mask: allocation mask
struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask)
{
struct scsi_cmnd *cmd;
+ unsigned char *buf;
- cmd = kmem_cache_alloc(shost->cmd_pool->slab,
- gfp_mask | shost->cmd_pool->gfp_mask);
+ cmd = scsi_host_alloc_command(shost, gfp_mask);
if (unlikely(!cmd)) {
unsigned long flags;
list_del_init(&cmd->list);
}
spin_unlock_irqrestore(&shost->free_list_lock, flags);
+
+ if (cmd) {
+ buf = cmd->sense_buffer;
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->sense_buffer = buf;
+ }
}
return cmd;
if (likely(cmd != NULL)) {
unsigned long flags;
- memset(cmd, 0, sizeof(*cmd));
cmd->device = dev;
init_timer(&cmd->eh_timeout);
INIT_LIST_HEAD(&cmd->list);
spin_unlock_irqrestore(&shost->free_list_lock, flags);
if (likely(cmd != NULL))
- kmem_cache_free(shost->cmd_pool->slab, cmd);
+ scsi_pool_free_command(shost->cmd_pool, cmd);
put_device(dev);
}
}
EXPORT_SYMBOL(scsi_put_command);
+static struct scsi_host_cmd_pool *scsi_get_host_cmd_pool(gfp_t gfp_mask)
+{
+ struct scsi_host_cmd_pool *retval = NULL, *pool;
+ /*
+ * Select a command slab for this host and create it if not
+ * yet existent.
+ */
+ mutex_lock(&host_cmd_pool_mutex);
+ pool = (gfp_mask & __GFP_DMA) ? &scsi_cmd_dma_pool :
+ &scsi_cmd_pool;
+ if (!pool->users) {
+ pool->cmd_slab = kmem_cache_create(pool->cmd_name,
+ sizeof(struct scsi_cmnd), 0,
+ pool->slab_flags, NULL);
+ if (!pool->cmd_slab)
+ goto fail;
+
+ pool->sense_slab = kmem_cache_create(pool->sense_name,
+ SCSI_SENSE_BUFFERSIZE, 0,
+ pool->slab_flags, NULL);
+ if (!pool->sense_slab) {
+ kmem_cache_destroy(pool->cmd_slab);
+ goto fail;
+ }
+ }
+
+ pool->users++;
+ retval = pool;
+ fail:
+ mutex_unlock(&host_cmd_pool_mutex);
+ return retval;
+}
+
+static void scsi_put_host_cmd_pool(gfp_t gfp_mask)
+{
+ struct scsi_host_cmd_pool *pool;
+
+ mutex_lock(&host_cmd_pool_mutex);
+ pool = (gfp_mask & __GFP_DMA) ? &scsi_cmd_dma_pool :
+ &scsi_cmd_pool;
+ /*
+ * This may happen if a driver has a mismatched get and put
+ * of the command pool; the driver should be implicated in
+ * the stack trace
+ */
+ BUG_ON(pool->users == 0);
+
+ if (!--pool->users) {
+ kmem_cache_destroy(pool->cmd_slab);
+ kmem_cache_destroy(pool->sense_slab);
+ }
+ mutex_unlock(&host_cmd_pool_mutex);
+}
+
+/**
+ * scsi_allocate_command - get a fully allocated SCSI command
+ * @gfp_mask: allocation mask
+ *
+ * This function is for use outside of the normal host based pools.
+ * It allocates the relevant command and takes an additional reference
+ * on the pool it used. This function *must* be paired with
+ * scsi_free_command which also has the identical mask, otherwise the
+ * free pool counts will eventually go wrong and you'll trigger a bug.
+ *
+ * This function should *only* be used by drivers that need a static
+ * command allocation at start of day for internal functions.
+ */
+struct scsi_cmnd *scsi_allocate_command(gfp_t gfp_mask)
+{
+ struct scsi_host_cmd_pool *pool = scsi_get_host_cmd_pool(gfp_mask);
+
+ if (!pool)
+ return NULL;
+
+ return scsi_pool_alloc_command(pool, gfp_mask);
+}
+EXPORT_SYMBOL(scsi_allocate_command);
+
+/**
+ * scsi_free_command - free a command allocated by scsi_allocate_command
+ * @gfp_mask: mask used in the original allocation
+ * @cmd: command to free
+ *
+ * Note: using the original allocation mask is vital because that's
+ * what determines which command pool we use to free the command. Any
+ * mismatch will cause the system to BUG eventually.
+ */
+void scsi_free_command(gfp_t gfp_mask, struct scsi_cmnd *cmd)
+{
+ struct scsi_host_cmd_pool *pool = scsi_get_host_cmd_pool(gfp_mask);
+
+ /*
+ * this could trigger if the mask to scsi_allocate_command
+ * doesn't match this mask. Otherwise we're guaranteed that this
+ * succeeds because scsi_allocate_command must have taken a reference
+ * on the pool
+ */
+ BUG_ON(!pool);
+
+ scsi_pool_free_command(pool, cmd);
+ /*
+ * scsi_put_host_cmd_pool is called twice; once to release the
+ * reference we took above, and once to release the reference
+ * originally taken by scsi_allocate_command
+ */
+ scsi_put_host_cmd_pool(gfp_mask);
+ scsi_put_host_cmd_pool(gfp_mask);
+}
+EXPORT_SYMBOL(scsi_free_command);
+
/**
* scsi_setup_command_freelist - Setup the command freelist for a scsi host.
* @shost: host to allocate the freelist for.
*/
int scsi_setup_command_freelist(struct Scsi_Host *shost)
{
- struct scsi_host_cmd_pool *pool;
struct scsi_cmnd *cmd;
+ const gfp_t gfp_mask = shost->unchecked_isa_dma ? GFP_DMA : GFP_KERNEL;
spin_lock_init(&shost->free_list_lock);
INIT_LIST_HEAD(&shost->free_list);
- /*
- * Select a command slab for this host and create it if not
- * yet existent.
- */
- mutex_lock(&host_cmd_pool_mutex);
- pool = (shost->unchecked_isa_dma ? &scsi_cmd_dma_pool : &scsi_cmd_pool);
- if (!pool->users) {
- pool->slab = kmem_cache_create(pool->name,
- sizeof(struct scsi_cmnd), 0,
- pool->slab_flags, NULL);
- if (!pool->slab)
- goto fail;
- }
+ shost->cmd_pool = scsi_get_host_cmd_pool(gfp_mask);
- pool->users++;
- shost->cmd_pool = pool;
- mutex_unlock(&host_cmd_pool_mutex);
+ if (!shost->cmd_pool)
+ return -ENOMEM;
/*
* Get one backup command for this host.
*/
- cmd = kmem_cache_alloc(shost->cmd_pool->slab,
- GFP_KERNEL | shost->cmd_pool->gfp_mask);
- if (!cmd)
- goto fail2;
- list_add(&cmd->list, &shost->free_list);
+ cmd = scsi_host_alloc_command(shost, gfp_mask);
+ if (!cmd) {
+ scsi_put_host_cmd_pool(gfp_mask);
+ shost->cmd_pool = NULL;
+ return -ENOMEM;
+ }
+ list_add(&cmd->list, &shost->free_list);
return 0;
-
- fail2:
- if (!--pool->users)
- kmem_cache_destroy(pool->slab);
- return -ENOMEM;
- fail:
- mutex_unlock(&host_cmd_pool_mutex);
- return -ENOMEM;
-
}
/**
*/
void scsi_destroy_command_freelist(struct Scsi_Host *shost)
{
+ /*
+ * If cmd_pool is NULL the free list was not initialized, so
+ * do not attempt to release resources.
+ */
+ if (!shost->cmd_pool)
+ return;
+
while (!list_empty(&shost->free_list)) {
struct scsi_cmnd *cmd;
cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list);
list_del_init(&cmd->list);
- kmem_cache_free(shost->cmd_pool->slab, cmd);
+ scsi_pool_free_command(shost->cmd_pool, cmd);
}
-
- mutex_lock(&host_cmd_pool_mutex);
- if (!--shost->cmd_pool->users)
- kmem_cache_destroy(shost->cmd_pool->slab);
- mutex_unlock(&host_cmd_pool_mutex);
+ shost->cmd_pool = NULL;
+ scsi_put_host_cmd_pool(shost->unchecked_isa_dma ? GFP_DMA : GFP_KERNEL);
}
#ifdef CONFIG_SCSI_LOGGING
goto out;
}
- /* Check to see if the scsi lld put this device into state SDEV_BLOCK. */
- if (unlikely(cmd->device->sdev_state == SDEV_BLOCK)) {
+ /* Check to see if the scsi lld made this device blocked. */
+ if (unlikely(scsi_device_blocked(cmd->device))) {
/*
- * in SDEV_BLOCK, the command is just put back on the device
- * queue. The suspend state has already blocked the queue so
- * future requests should not occur until the device
- * transitions out of the suspend state.
+ * in blocked state, the command is just put back on
+ * the device queue. The suspend state has already
+ * blocked the queue so future requests should not
+ * occur until the device transitions out of the
+ * suspend state.
*/
scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
* Before we queue this command, check if the command
* length exceeds what the host adapter can handle.
*/
- if (CDB_SIZE(cmd) > cmd->device->host->max_cmd_len) {
+ if (cmd->cmd_len > cmd->device->host->max_cmd_len) {
SCSI_LOG_MLQUEUE(3,
- printk("queuecommand : command too long.\n"));
+ printk("queuecommand : command too long. "
+ "cdb_size=%d host->max_cmd_len=%d\n",
+ cmd->cmd_len, cmd->device->host->max_cmd_len));
cmd->result = (DID_ABORT << 16);
scsi_done(cmd);
"Notifying upper driver of completion "
"(result %x)\n", cmd->result));
- good_bytes = cmd->request_bufflen;
+ good_bytes = scsi_bufflen(cmd);
if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
+ int old_good_bytes = good_bytes;
drv = scsi_cmd_to_driver(cmd);
if (drv->done)
good_bytes = drv->done(cmd);
+ /*
+ * USB may not give sense identifying bad sector and
+ * simply return a residue instead, so subtract off the
+ * residue if drv->done() error processing indicates no
+ * change to the completion length.
+ */
+ if (good_bytes == old_good_bytes)
+ good_bytes -= scsi_get_resid(cmd);
}
scsi_io_completion(cmd, good_bytes);
}
spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
- /* Check to see if the queue is managed by the block layer.
- * If it is, and we fail to adjust the depth, exit. */
- if (blk_queue_tagged(sdev->request_queue) &&
- blk_queue_resize_tags(sdev->request_queue, tags) != 0)
- goto out;
+ /*
+ * Check to see if the queue is managed by the block layer.
+ * If it is, and we fail to adjust the depth, exit.
+ *
+ * Do not resize the tag map if it is a host wide share bqt,
+ * because the size should be the hosts's can_queue. If there
+ * is more IO than the LLD's can_queue (so there are not enuogh
+ * tags) request_fn's host queue ready check will handle it.
+ */
+ if (!sdev->host->bqt) {
+ if (blk_queue_tagged(sdev->request_queue) &&
+ blk_queue_resize_tags(sdev->request_queue, tags) != 0)
+ goto out;
+ }
sdev->queue_depth = tags;
switch (tagged) {
EXPORT_SYMBOL(starget_for_each_device);
/**
- * __starget_for_each_device - helper to walk all devices of a target
- * (UNLOCKED)
+ * __starget_for_each_device - helper to walk all devices of a target (UNLOCKED)
* @starget: target whose devices we want to iterate over.
+ * @data: parameter for callback @fn()
+ * @fn: callback function that is invoked for each device
*
* This traverses over each device of @starget. It does _not_
* take a reference on the scsi_device, so the whole loop must be