X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=drivers%2Fscsi%2Fscsi_lib.c;h=ff5d56b3ee4d5c66a6ee4ab34609fc1800cd347b;hb=c437d657c59921b048997d5bc1f5733670112902;hp=0c86be71bb33773d504a325270deaf00b88655a2;hpb=f5c0dde4c66421a3a2d7d6fa604a712c9b0744e5;p=safe%2Fjmp%2Flinux-2.6 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 0c86be7..ff5d56b 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -8,6 +8,7 @@ */ #include +#include #include #include #include @@ -34,13 +35,6 @@ #define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools) #define SG_MEMPOOL_SIZE 2 -/* - * The maximum number of SG segments that we will put inside a scatterlist - * (unless chaining is used). Should ideally fit inside a single page, to - * avoid a higher order allocation. - */ -#define SCSI_MAX_SG_SEGMENTS 128 - struct scsi_host_sg_pool { size_t size; char *name; @@ -48,22 +42,31 @@ struct scsi_host_sg_pool { mempool_t *pool; }; -#define SP(x) { x, "sgpool-" #x } +#define SP(x) { x, "sgpool-" __stringify(x) } +#if (SCSI_MAX_SG_SEGMENTS < 32) +#error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater) +#endif static struct scsi_host_sg_pool scsi_sg_pools[] = { SP(8), SP(16), -#if (SCSI_MAX_SG_SEGMENTS > 16) - SP(32), #if (SCSI_MAX_SG_SEGMENTS > 32) - SP(64), + SP(32), #if (SCSI_MAX_SG_SEGMENTS > 64) + SP(64), +#if (SCSI_MAX_SG_SEGMENTS > 128) SP(128), +#if (SCSI_MAX_SG_SEGMENTS > 256) +#error SCSI_MAX_SG_SEGMENTS is too large (256 MAX) +#endif #endif #endif #endif + SP(SCSI_MAX_SG_SEGMENTS) }; #undef SP +struct kmem_cache *scsi_sdb_cache; + static void scsi_run_queue(struct request_queue *q); /* @@ -175,7 +178,7 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) * * returns the req->errors value which is the scsi_cmnd result * field. - **/ + */ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, int data_direction, void *buffer, unsigned bufflen, unsigned char *sense, int timeout, int retries, int flags) @@ -204,6 +207,15 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, */ blk_execute_rq(req->q, NULL, req, 1); + /* + * Some devices (USB mass-storage in particular) may transfer + * garbage data together with a residue indicating that the data + * is invalid. Prevent the garbage from being misinterpreted + * and prevent security leaks by zeroing out the excess data. + */ + if (unlikely(req->data_len > 0 && req->data_len <= bufflen)) + memset(buffer + (bufflen - req->data_len), 0, req->data_len); + ret = req->errors; out: blk_put_request(req); @@ -274,7 +286,7 @@ static void scsi_bi_endio(struct bio *bio, int error) /** * scsi_req_map_sg - map a scatterlist into a request * @rq: request to fill - * @sg: scatterlist + * @sgl: scatterlist * @nsegs: number of elements * @bufflen: len of buffer * @gfp: memory allocation flags @@ -295,10 +307,9 @@ static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl, int i, err, nr_vecs = 0; for_each_sg(sgl, sg, nsegs, i) { - page = sg->page; + page = sg_page(sg); off = sg->offset; len = sg->length; - data_len += len; while (len > 0 && data_len > 0) { /* @@ -365,14 +376,16 @@ free_bios: * @sdev: scsi device * @cmd: scsi command * @cmd_len: length of scsi cdb - * @data_direction: data direction + * @data_direction: DMA_TO_DEVICE, DMA_FROM_DEVICE, or DMA_NONE * @buffer: data buffer (this can be a kernel buffer or scatterlist) * @bufflen: len of buffer * @use_sg: if buffer is a scatterlist this is the number of elements * @timeout: request timeout in seconds * @retries: number of times to retry request - * @flags: or into request flags - **/ + * @privdata: data passed to done() + * @done: callback function when done + * @gfp: memory allocation flags + */ int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd, int cmd_len, int data_direction, void *buffer, unsigned bufflen, int use_sg, int timeout, int retries, void *privdata, @@ -438,10 +451,10 @@ EXPORT_SYMBOL_GPL(scsi_execute_async); static void scsi_init_cmd_errh(struct scsi_cmnd *cmd) { cmd->serial_number = 0; - cmd->resid = 0; - memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer); + scsi_set_resid(cmd, 0); + memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); if (cmd->cmd_len == 0) - cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]); + cmd->cmd_len = scsi_command_size(cmd->cmnd); } void scsi_device_unbusy(struct scsi_device *sdev) @@ -524,7 +537,7 @@ static void scsi_run_queue(struct request_queue *q) struct Scsi_Host *shost = sdev->host; unsigned long flags; - if (sdev->single_lun) + if (scsi_target(sdev)->single_lun) scsi_single_lun_run(sdev); spin_lock_irqsave(shost->host_lock, flags); @@ -532,6 +545,9 @@ static void scsi_run_queue(struct request_queue *q) !shost->host_blocked && !shost->host_self_blocked && !((shost->can_queue > 0) && (shost->host_busy >= shost->can_queue))) { + + int flagset; + /* * As long as shost is accepting commands and we have * starved queues, call blk_run_queue. scsi_request_fn @@ -545,19 +561,20 @@ static void scsi_run_queue(struct request_queue *q) sdev = list_entry(shost->starved_list.next, struct scsi_device, starved_entry); list_del_init(&sdev->starved_entry); - spin_unlock_irqrestore(shost->host_lock, flags); + spin_unlock(shost->host_lock); + + spin_lock(sdev->request_queue->queue_lock); + flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) && + !test_bit(QUEUE_FLAG_REENTER, + &sdev->request_queue->queue_flags); + if (flagset) + queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue); + __blk_run_queue(sdev->request_queue); + if (flagset) + queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue); + spin_unlock(sdev->request_queue->queue_lock); - - if (test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) && - !test_and_set_bit(QUEUE_FLAG_REENTER, - &sdev->request_queue->queue_flags)) { - blk_run_queue(sdev->request_queue); - clear_bit(QUEUE_FLAG_REENTER, - &sdev->request_queue->queue_flags); - } else - blk_run_queue(sdev->request_queue); - - spin_lock_irqsave(shost->host_lock, flags); + spin_lock(shost->host_lock); if (unlikely(!list_empty(&sdev->starved_entry))) /* * sdev lost a race, and was put back on the @@ -632,7 +649,7 @@ void scsi_run_host_queues(struct Scsi_Host *shost) * of upper level post-processing and scsi_io_completion). * * Arguments: cmd - command that is complete. - * uptodate - 1 if I/O indicates success, <= 0 for I/O error. + * error - 0 if I/O indicates success, < 0 for I/O error. * bytes - number of bytes of completed I/O * requeue - indicates whether we should requeue leftovers. * @@ -647,26 +664,25 @@ void scsi_run_host_queues(struct Scsi_Host *shost) * at some point during this call. * Notes: If cmd was requeued, upon return it will be a stale pointer. */ -static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate, +static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error, int bytes, int requeue) { struct request_queue *q = cmd->device->request_queue; struct request *req = cmd->request; - unsigned long flags; /* * If there are blocks left over at the end, set up the command * to queue the remainder of them. */ - if (end_that_request_chunk(req, uptodate, bytes)) { + if (blk_end_request(req, error, bytes)) { int leftover = (req->hard_nr_sectors << 9); if (blk_pc_request(req)) leftover = req->data_len; /* kill remainder if no retrys */ - if (!uptodate && blk_noretry_request(req)) - end_that_request_chunk(req, 0, leftover); + if (error && blk_noretry_request(req)) + blk_end_request(req, error, leftover); else { if (requeue) { /* @@ -681,14 +697,6 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate, } } - add_disk_randomness(req->rq_disk); - - spin_lock_irqsave(q->queue_lock, flags); - if (blk_rq_tagged(req)) - blk_queue_end_tag(q, req); - end_that_request_last(req, uptodate); - spin_unlock_irqrestore(q->queue_lock, flags); - /* * This will goose the queue request function at the end, so we don't * need to worry about launching another command. @@ -697,173 +705,57 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate, return NULL; } -/* - * Like SCSI_MAX_SG_SEGMENTS, but for archs that have sg chaining. This limit - * is totally arbitrary, a setting of 2048 will get you at least 8mb ios. - */ -#define SCSI_MAX_SG_CHAIN_SEGMENTS 2048 - static inline unsigned int scsi_sgtable_index(unsigned short nents) { unsigned int index; - switch (nents) { - case 1 ... 8: + BUG_ON(nents > SCSI_MAX_SG_SEGMENTS); + + if (nents <= 8) index = 0; - break; - case 9 ... 16: - index = 1; - break; -#if (SCSI_MAX_SG_SEGMENTS > 16) - case 17 ... 32: - index = 2; - break; -#if (SCSI_MAX_SG_SEGMENTS > 32) - case 33 ... 64: - index = 3; - break; -#if (SCSI_MAX_SG_SEGMENTS > 64) - case 65 ... 128: - index = 4; - break; -#endif -#endif -#endif - default: - printk(KERN_ERR "scsi: bad segment count=%d\n", nents); - BUG(); - } + else + index = get_count_order(nents) - 3; return index; } -struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask) +static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents) { struct scsi_host_sg_pool *sgp; - struct scatterlist *sgl, *prev, *ret; - unsigned int index; - int this, left; - - BUG_ON(!cmd->use_sg); - left = cmd->use_sg; - ret = prev = NULL; - do { - this = left; - if (this > SCSI_MAX_SG_SEGMENTS) { - this = SCSI_MAX_SG_SEGMENTS - 1; - index = SG_MEMPOOL_NR - 1; - } else - index = scsi_sgtable_index(this); - - left -= this; + sgp = scsi_sg_pools + scsi_sgtable_index(nents); + mempool_free(sgl, sgp->pool); +} - sgp = scsi_sg_pools + index; +static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask) +{ + struct scsi_host_sg_pool *sgp; - sgl = mempool_alloc(sgp->pool, gfp_mask); - if (unlikely(!sgl)) - goto enomem; + sgp = scsi_sg_pools + scsi_sgtable_index(nents); + return mempool_alloc(sgp->pool, gfp_mask); +} - /* - * first loop through, set initial index and return value - */ - if (!ret) - ret = sgl; +static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, + gfp_t gfp_mask) +{ + int ret; - /* - * chain previous sglist, if any. we know the previous - * sglist must be the biggest one, or we would not have - * ended up doing another loop. - */ - if (prev) - sg_chain(prev, SCSI_MAX_SG_SEGMENTS, sgl); + BUG_ON(!nents); - /* - * don't allow subsequent mempool allocs to sleep, it would - * violate the mempool principle. - */ - gfp_mask &= ~__GFP_WAIT; - gfp_mask |= __GFP_HIGH; - prev = sgl; - } while (left); + ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS, + gfp_mask, scsi_sg_alloc); + if (unlikely(ret)) + __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, + scsi_sg_free); - /* - * ->use_sg may get modified after dma mapping has potentially - * shrunk the number of segments, so keep a copy of it for free. - */ - cmd->__use_sg = cmd->use_sg; return ret; -enomem: - if (ret) { - /* - * Free entries chained off ret. Since we were trying to - * allocate another sglist, we know that all entries are of - * the max size. - */ - sgp = scsi_sg_pools + SG_MEMPOOL_NR - 1; - prev = ret; - ret = &ret[SCSI_MAX_SG_SEGMENTS - 1]; - - while ((sgl = sg_chain_ptr(ret)) != NULL) { - ret = &sgl[SCSI_MAX_SG_SEGMENTS - 1]; - mempool_free(sgl, sgp->pool); - } - - mempool_free(prev, sgp->pool); - } - return NULL; } -EXPORT_SYMBOL(scsi_alloc_sgtable); - -void scsi_free_sgtable(struct scsi_cmnd *cmd) +static void scsi_free_sgtable(struct scsi_data_buffer *sdb) { - struct scatterlist *sgl = cmd->request_buffer; - struct scsi_host_sg_pool *sgp; - - /* - * if this is the biggest size sglist, check if we have - * chained parts we need to free - */ - if (cmd->__use_sg > SCSI_MAX_SG_SEGMENTS) { - unsigned short this, left; - struct scatterlist *next; - unsigned int index; - - left = cmd->__use_sg - (SCSI_MAX_SG_SEGMENTS - 1); - next = sg_chain_ptr(&sgl[SCSI_MAX_SG_SEGMENTS - 1]); - while (left && next) { - sgl = next; - this = left; - if (this > SCSI_MAX_SG_SEGMENTS) { - this = SCSI_MAX_SG_SEGMENTS - 1; - index = SG_MEMPOOL_NR - 1; - } else - index = scsi_sgtable_index(this); - - left -= this; - - sgp = scsi_sg_pools + index; - - if (left) - next = sg_chain_ptr(&sgl[sgp->size - 1]); - - mempool_free(sgl, sgp->pool); - } - - /* - * Restore original, will be freed below - */ - sgl = cmd->request_buffer; - sgp = scsi_sg_pools + SG_MEMPOOL_NR - 1; - } else - sgp = scsi_sg_pools + scsi_sgtable_index(cmd->__use_sg); - - mempool_free(sgl, sgp->pool); + __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free); } -EXPORT_SYMBOL(scsi_free_sgtable); - /* * Function: scsi_release_buffers() * @@ -881,17 +773,52 @@ EXPORT_SYMBOL(scsi_free_sgtable); * the scatter-gather table, and potentially any bounce * buffers. */ -static void scsi_release_buffers(struct scsi_cmnd *cmd) +void scsi_release_buffers(struct scsi_cmnd *cmd) { - if (cmd->use_sg) - scsi_free_sgtable(cmd); + if (cmd->sdb.table.nents) + scsi_free_sgtable(&cmd->sdb); + + memset(&cmd->sdb, 0, sizeof(cmd->sdb)); + + if (scsi_bidi_cmnd(cmd)) { + struct scsi_data_buffer *bidi_sdb = + cmd->request->next_rq->special; + scsi_free_sgtable(bidi_sdb); + kmem_cache_free(scsi_sdb_cache, bidi_sdb); + cmd->request->next_rq->special = NULL; + } + + if (scsi_prot_sg_count(cmd)) + scsi_free_sgtable(cmd->prot_sdb); +} +EXPORT_SYMBOL(scsi_release_buffers); + +/* + * Bidi commands Must be complete as a whole, both sides at once. + * If part of the bytes were written and lld returned + * scsi_in()->resid and/or scsi_out()->resid this information will be left + * in req->data_len and req->next_rq->data_len. The upper-layer driver can + * decide what to do with this information. + */ +static void scsi_end_bidi_request(struct scsi_cmnd *cmd) +{ + struct request *req = cmd->request; + unsigned int dlen = req->data_len; + unsigned int next_dlen = req->next_rq->data_len; + + req->data_len = scsi_out(cmd)->resid; + req->next_rq->data_len = scsi_in(cmd)->resid; + + /* The req and req->next_rq have not been completed */ + BUG_ON(blk_end_bidi_request(req, 0, dlen, next_dlen)); + + scsi_release_buffers(cmd); /* - * Zero these out. They now point to freed memory, and it is - * dangerous to hang onto the pointers. + * This will goose the queue request function at the end, so we don't + * need to worry about launching another command. */ - cmd->request_buffer = NULL; - cmd->request_bufflen = 0; + scsi_next_command(cmd); } /* @@ -925,16 +852,14 @@ static void scsi_release_buffers(struct scsi_cmnd *cmd) void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) { int result = cmd->result; - int this_count = cmd->request_bufflen; + int this_count = scsi_bufflen(cmd); struct request_queue *q = cmd->device->request_queue; struct request *req = cmd->request; - int clear_errors = 1; + int error = 0; struct scsi_sense_hdr sshdr; int sense_valid = 0; int sense_deferred = 0; - scsi_release_buffers(cmd); - if (result) { sense_valid = scsi_command_normalize_sense(cmd, &sshdr); if (sense_valid) @@ -944,7 +869,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) if (blk_pc_request(req)) { /* SG_IO ioctl from block level */ req->errors = result; if (result) { - clear_errors = 0; if (sense_valid && req->sense) { /* * SG_IO wants current and deferred errors @@ -956,10 +880,20 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) memcpy(req->sense, cmd->sense_buffer, len); req->sense_len = len; } + if (!sense_deferred) + error = -EIO; } - req->data_len = cmd->resid; + if (scsi_bidi_cmnd(cmd)) { + /* will also release_buffers */ + scsi_end_bidi_request(cmd); + return; + } + req->data_len = scsi_get_resid(cmd); } + BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */ + scsi_release_buffers(cmd); + /* * Next deal with any sectors which we were able to correctly * handle. @@ -967,16 +901,12 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, " "%d bytes done.\n", req->nr_sectors, good_bytes)); - SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd->use_sg)); - - if (clear_errors) - req->errors = 0; /* A number of bytes were successfully read. If there * are leftovers and there is some kind of error * (result != 0), retry the rest. */ - if (scsi_end_request(cmd, 1, good_bytes, result == 0) == NULL) + if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL) return; /* good_bytes = 0, or (inclusive) there were leftovers and @@ -990,7 +920,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) * and quietly refuse further access. */ cmd->device->changed = 1; - scsi_end_request(cmd, 0, this_count, 1); + scsi_end_request(cmd, -EIO, this_count, 1); return; } else { /* Must have been a power glitch, or a @@ -1020,9 +950,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) * 6-byte command. */ scsi_requeue_command(q, cmd); - return; - } else { - scsi_end_request(cmd, 0, this_count, 1); + } else if (sshdr.asc == 0x10) /* DIX */ + scsi_end_request(cmd, -EIO, this_count, 0); + else + scsi_end_request(cmd, -EIO, this_count, 1); + return; + case ABORTED_COMMAND: + if (sshdr.asc == 0x10) { /* DIF */ + scsi_end_request(cmd, -EIO, this_count, 0); return; } break; @@ -1050,7 +985,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) "Device not ready", &sshdr); - scsi_end_request(cmd, 0, this_count, 1); + scsi_end_request(cmd, -EIO, this_count, 1); return; case VOLUME_OVERFLOW: if (!(req->cmd_flags & REQ_QUIET)) { @@ -1060,7 +995,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) scsi_print_sense("", cmd); } /* See SSC3rXX or current. */ - scsi_end_request(cmd, 0, this_count, 1); + scsi_end_request(cmd, -EIO, this_count, 1); return; default: break; @@ -1081,7 +1016,36 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) scsi_print_sense("", cmd); } } - scsi_end_request(cmd, 0, this_count, !result); + scsi_end_request(cmd, -EIO, this_count, !result); +} + +static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, + gfp_t gfp_mask) +{ + int count; + + /* + * If sg table allocation fails, requeue request later. + */ + if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments, + gfp_mask))) { + return BLKPREP_DEFER; + } + + req->buffer = NULL; + + /* + * Next, walk the list, and fill in the addresses and sizes of + * each segment. + */ + count = blk_rq_map_sg(req->q, req, sdb->table.sgl); + BUG_ON(count > sdb->table.nents); + sdb->table.nents = count; + if (blk_pc_request(req)) + sdb->length = req->data_len; + else + sdb->length = req->nr_sectors << 9; + return BLKPREP_OK; } /* @@ -1095,50 +1059,59 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) * BLKPREP_DEFER if the failure is retryable * BLKPREP_KILL if the failure is fatal */ -static int scsi_init_io(struct scsi_cmnd *cmd) +int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) { - struct request *req = cmd->request; - int count; - - /* - * We used to not use scatter-gather for single segment request, - * but now we do (it makes highmem I/O easier to support without - * kmapping pages) - */ - cmd->use_sg = req->nr_phys_segments; + int error = scsi_init_sgtable(cmd->request, &cmd->sdb, gfp_mask); + if (error) + goto err_exit; + + if (blk_bidi_rq(cmd->request)) { + struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc( + scsi_sdb_cache, GFP_ATOMIC); + if (!bidi_sdb) { + error = BLKPREP_DEFER; + goto err_exit; + } - /* - * If sg table allocation fails, requeue request later. - */ - cmd->request_buffer = scsi_alloc_sgtable(cmd, GFP_ATOMIC); - if (unlikely(!cmd->request_buffer)) { - scsi_unprep_request(req); - return BLKPREP_DEFER; + cmd->request->next_rq->special = bidi_sdb; + error = scsi_init_sgtable(cmd->request->next_rq, bidi_sdb, + GFP_ATOMIC); + if (error) + goto err_exit; } - req->buffer = NULL; - if (blk_pc_request(req)) - cmd->request_bufflen = req->data_len; - else - cmd->request_bufflen = req->nr_sectors << 9; + if (blk_integrity_rq(cmd->request)) { + struct scsi_data_buffer *prot_sdb = cmd->prot_sdb; + int ivecs, count; - /* - * Next, walk the list, and fill in the addresses and sizes of - * each segment. - */ - count = blk_rq_map_sg(req->q, req, cmd->request_buffer); - if (likely(count <= cmd->use_sg)) { - cmd->use_sg = count; - return BLKPREP_OK; + BUG_ON(prot_sdb == NULL); + ivecs = blk_rq_count_integrity_sg(cmd->request); + + if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) { + error = BLKPREP_DEFER; + goto err_exit; + } + + count = blk_rq_map_integrity_sg(cmd->request, + prot_sdb->table.sgl); + BUG_ON(unlikely(count > ivecs)); + + cmd->prot_sdb = prot_sdb; + cmd->prot_sdb->table.nents = count; } - printk(KERN_ERR "Incorrect number of segments after building list\n"); - printk(KERN_ERR "counted %d, received %d\n", count, cmd->use_sg); - printk(KERN_ERR "req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors, - req->current_nr_sectors); + return BLKPREP_OK ; + +err_exit: + scsi_release_buffers(cmd); + if (error == BLKPREP_KILL) + scsi_put_command(cmd); + else /* BLKPREP_DEFER */ + scsi_unprep_request(cmd->request); - return BLKPREP_KILL; + return error; } +EXPORT_SYMBOL(scsi_init_io); static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev, struct request *req) @@ -1158,6 +1131,8 @@ static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev, cmd->tag = req->tag; cmd->request = req; + cmd->cmnd = req->cmd; + return cmd; } @@ -1184,21 +1159,17 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) BUG_ON(!req->nr_phys_segments); - ret = scsi_init_io(cmd); + ret = scsi_init_io(cmd, GFP_ATOMIC); if (unlikely(ret)) return ret; } else { BUG_ON(req->data_len); BUG_ON(req->data); - cmd->request_bufflen = 0; - cmd->request_buffer = NULL; - cmd->use_sg = 0; + memset(&cmd->sdb, 0, sizeof(cmd->sdb)); req->buffer = NULL; } - BUILD_BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd)); - memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd)); cmd->cmd_len = req->cmd_len; if (!req->data_len) cmd->sc_data_direction = DMA_NONE; @@ -1226,6 +1197,14 @@ int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) if (ret != BLKPREP_OK) return ret; + + if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh + && sdev->scsi_dh_data->scsi_dh->prep_fn)) { + ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req); + if (ret != BLKPREP_OK) + return ret; + } + /* * Filesystem requests must transfer data. */ @@ -1235,7 +1214,8 @@ int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) if (unlikely(!cmd)) return BLKPREP_DEFER; - return scsi_init_io(cmd); + memset(cmd->cmnd, 0, BLK_MAX_CDB); + return scsi_init_io(cmd, GFP_ATOMIC); } EXPORT_SYMBOL(scsi_setup_fs_cmnd); @@ -1323,7 +1303,7 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret) } EXPORT_SYMBOL(scsi_prep_return); -static int scsi_prep_fn(struct request_queue *q, struct request *req) +int scsi_prep_fn(struct request_queue *q, struct request *req) { struct scsi_device *sdev = q->queuedata; int ret = BLKPREP_KILL; @@ -1385,7 +1365,6 @@ static inline int scsi_host_queue_ready(struct request_queue *q, printk("scsi%d unblocking host at zero depth\n", shost->host_no)); } else { - blk_plug_device(q); return 0; } } @@ -1416,7 +1395,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q) if (unlikely(cmd == NULL)) { printk(KERN_CRIT "impossible request in %s.\n", - __FUNCTION__); + __func__); BUG(); } @@ -1540,15 +1519,30 @@ static void scsi_request_fn(struct request_queue *q) printk(KERN_CRIT "impossible request in %s.\n" "please mail a stack trace to " "linux-scsi@vger.kernel.org\n", - __FUNCTION__); + __func__); blk_dump_rq_flags(req, "foo"); BUG(); } spin_lock(shost->host_lock); + /* + * We hit this when the driver is using a host wide + * tag map. For device level tag maps the queue_depth check + * in the device ready fn would prevent us from trying + * to allocate a tag. Since the map is a shared host resource + * we add the dev to the starved list so it eventually gets + * a run when a tag is freed. + */ + if (blk_queue_tagged(q) && !blk_rq_tagged(req)) { + if (list_empty(&sdev->starved_entry)) + list_add_tail(&sdev->starved_entry, + &shost->starved_list); + goto not_ready; + } + if (!scsi_host_queue_ready(q, shost, sdev)) goto not_ready; - if (sdev->single_lun) { + if (scsi_target(sdev)->single_lun) { if (scsi_target(sdev)->starget_sdev_user && scsi_target(sdev)->starget_sdev_user != sdev) goto not_ready; @@ -1636,6 +1630,7 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, request_fn_proc *request_fn) { struct request_queue *q; + struct device *dev = shost->shost_gendev.parent; q = blk_init_queue(request_fn, NULL); if (!q) @@ -1645,27 +1640,26 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, * this limit is imposed by hardware restrictions */ blk_queue_max_hw_segments(q, shost->sg_tablesize); - - /* - * In the future, sg chaining support will be mandatory and this - * ifdef can then go away. Right now we don't have all archs - * converted, so better keep it safe. - */ -#ifdef ARCH_HAS_SG_CHAIN - if (shost->use_sg_chaining) - blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS); - else - blk_queue_max_phys_segments(q, SCSI_MAX_SG_SEGMENTS); -#else - blk_queue_max_phys_segments(q, SCSI_MAX_SG_SEGMENTS); -#endif + blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS); blk_queue_max_sectors(q, shost->max_sectors); blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); blk_queue_segment_boundary(q, shost->dma_boundary); + dma_set_seg_boundary(dev, shost->dma_boundary); + blk_queue_max_segment_size(q, dma_get_max_seg_size(dev)); + + /* New queue, no concurrency on queue_flags */ if (!shost->use_clustering) - clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); + queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); + + /* + * set a reasonable default alignment on word boundaries: the + * host and device may alter it using + * blk_queue_update_dma_alignment() later. + */ + blk_queue_dma_alignment(q, 0x03); + return q; } EXPORT_SYMBOL(__scsi_alloc_queue); @@ -1749,6 +1743,14 @@ int __init scsi_init_queue(void) return -ENOMEM; } + scsi_sdb_cache = kmem_cache_create("scsi_data_buffer", + sizeof(struct scsi_data_buffer), + 0, 0, NULL); + if (!scsi_sdb_cache) { + printk(KERN_ERR "SCSI: can't init scsi sdb cache\n"); + goto cleanup_io_context; + } + for (i = 0; i < SG_MEMPOOL_NR; i++) { struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; int size = sgp->size * sizeof(struct scatterlist); @@ -1758,6 +1760,7 @@ int __init scsi_init_queue(void) if (!sgp->slab) { printk(KERN_ERR "SCSI: can't init sg slab %s\n", sgp->name); + goto cleanup_sdb; } sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE, @@ -1765,10 +1768,25 @@ int __init scsi_init_queue(void) if (!sgp->pool) { printk(KERN_ERR "SCSI: can't init sg mempool %s\n", sgp->name); + goto cleanup_sdb; } } return 0; + +cleanup_sdb: + for (i = 0; i < SG_MEMPOOL_NR; i++) { + struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; + if (sgp->pool) + mempool_destroy(sgp->pool); + if (sgp->slab) + kmem_cache_destroy(sgp->slab); + } + kmem_cache_destroy(scsi_sdb_cache); +cleanup_io_context: + kmem_cache_destroy(scsi_io_context_cache); + + return -ENOMEM; } void scsi_exit_queue(void) @@ -1776,6 +1794,7 @@ void scsi_exit_queue(void) int i; kmem_cache_destroy(scsi_io_context_cache); + kmem_cache_destroy(scsi_sdb_cache); for (i = 0; i < SG_MEMPOOL_NR; i++) { struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; @@ -1795,7 +1814,7 @@ void scsi_exit_queue(void) * @timeout: command timeout * @retries: number of retries before failing * @data: returns a structure abstracting the mode header data - * @sense: place to put sense data (or NULL if no sense to be collected). + * @sshdr: place to put sense data (or NULL if no sense to be collected). * must be SCSI_SENSE_BUFFERSIZE big. * * Returns zero if successful; negative error number or scsi @@ -1862,8 +1881,7 @@ scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage, EXPORT_SYMBOL_GPL(scsi_mode_select); /** - * scsi_mode_sense - issue a mode sense, falling back from 10 to - * six bytes if necessary. + * scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary. * @sdev: SCSI device to be queried * @dbd: set if mode sense will allow block descriptors to be returned * @modepage: mode page being requested @@ -1872,13 +1890,13 @@ EXPORT_SYMBOL_GPL(scsi_mode_select); * @timeout: command timeout * @retries: number of retries before failing * @data: returns a structure abstracting the mode header data - * @sense: place to put sense data (or NULL if no sense to be collected). + * @sshdr: place to put sense data (or NULL if no sense to be collected). * must be SCSI_SENSE_BUFFERSIZE big. * * Returns zero if unsuccessful, or the header offset (either 4 * or 8 depending on whether a six or ten byte command was * issued) if successful. - **/ + */ int scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, unsigned char *buffer, int len, int timeout, int retries, @@ -1972,40 +1990,69 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, } EXPORT_SYMBOL(scsi_mode_sense); +/** + * scsi_test_unit_ready - test if unit is ready + * @sdev: scsi device to change the state of. + * @timeout: command timeout + * @retries: number of retries before failing + * @sshdr_external: Optional pointer to struct scsi_sense_hdr for + * returning sense. Make sure that this is cleared before passing + * in. + * + * Returns zero if unsuccessful or an error if TUR failed. For + * removable media, a return of NOT_READY or UNIT_ATTENTION is + * translated to success, with the ->changed flag updated. + **/ int -scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries) +scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries, + struct scsi_sense_hdr *sshdr_external) { char cmd[] = { TEST_UNIT_READY, 0, 0, 0, 0, 0, }; - struct scsi_sense_hdr sshdr; + struct scsi_sense_hdr *sshdr; int result; - - result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, &sshdr, - timeout, retries); + + if (!sshdr_external) + sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL); + else + sshdr = sshdr_external; + + /* try to eat the UNIT_ATTENTION if there are enough retries */ + do { + result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr, + timeout, retries); + } while ((driver_byte(result) & DRIVER_SENSE) && + sshdr && sshdr->sense_key == UNIT_ATTENTION && + --retries); + + if (!sshdr) + /* could not allocate sense buffer, so can't process it */ + return result; if ((driver_byte(result) & DRIVER_SENSE) && sdev->removable) { - if ((scsi_sense_valid(&sshdr)) && - ((sshdr.sense_key == UNIT_ATTENTION) || - (sshdr.sense_key == NOT_READY))) { + if ((scsi_sense_valid(sshdr)) && + ((sshdr->sense_key == UNIT_ATTENTION) || + (sshdr->sense_key == NOT_READY))) { sdev->changed = 1; result = 0; } } + if (!sshdr_external) + kfree(sshdr); return result; } EXPORT_SYMBOL(scsi_test_unit_ready); /** - * scsi_device_set_state - Take the given device through the device - * state model. + * scsi_device_set_state - Take the given device through the device state model. * @sdev: scsi device to change the state of. * @state: state to change to. * * Returns zero if unsuccessful or an error if the requested * transition is illegal. - **/ + */ int scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) { @@ -2106,6 +2153,147 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) EXPORT_SYMBOL(scsi_device_set_state); /** + * sdev_evt_emit - emit a single SCSI device uevent + * @sdev: associated SCSI device + * @evt: event to emit + * + * Send a single uevent (scsi_event) to the associated scsi_device. + */ +static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt) +{ + int idx = 0; + char *envp[3]; + + switch (evt->evt_type) { + case SDEV_EVT_MEDIA_CHANGE: + envp[idx++] = "SDEV_MEDIA_CHANGE=1"; + break; + + default: + /* do nothing */ + break; + } + + envp[idx++] = NULL; + + kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp); +} + +/** + * sdev_evt_thread - send a uevent for each scsi event + * @work: work struct for scsi_device + * + * Dispatch queued events to their associated scsi_device kobjects + * as uevents. + */ +void scsi_evt_thread(struct work_struct *work) +{ + struct scsi_device *sdev; + LIST_HEAD(event_list); + + sdev = container_of(work, struct scsi_device, event_work); + + while (1) { + struct scsi_event *evt; + struct list_head *this, *tmp; + unsigned long flags; + + spin_lock_irqsave(&sdev->list_lock, flags); + list_splice_init(&sdev->event_list, &event_list); + spin_unlock_irqrestore(&sdev->list_lock, flags); + + if (list_empty(&event_list)) + break; + + list_for_each_safe(this, tmp, &event_list) { + evt = list_entry(this, struct scsi_event, node); + list_del(&evt->node); + scsi_evt_emit(sdev, evt); + kfree(evt); + } + } +} + +/** + * sdev_evt_send - send asserted event to uevent thread + * @sdev: scsi_device event occurred on + * @evt: event to send + * + * Assert scsi device event asynchronously. + */ +void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt) +{ + unsigned long flags; + +#if 0 + /* FIXME: currently this check eliminates all media change events + * for polled devices. Need to update to discriminate between AN + * and polled events */ + if (!test_bit(evt->evt_type, sdev->supported_events)) { + kfree(evt); + return; + } +#endif + + spin_lock_irqsave(&sdev->list_lock, flags); + list_add_tail(&evt->node, &sdev->event_list); + schedule_work(&sdev->event_work); + spin_unlock_irqrestore(&sdev->list_lock, flags); +} +EXPORT_SYMBOL_GPL(sdev_evt_send); + +/** + * sdev_evt_alloc - allocate a new scsi event + * @evt_type: type of event to allocate + * @gfpflags: GFP flags for allocation + * + * Allocates and returns a new scsi_event. + */ +struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type, + gfp_t gfpflags) +{ + struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags); + if (!evt) + return NULL; + + evt->evt_type = evt_type; + INIT_LIST_HEAD(&evt->node); + + /* evt_type-specific initialization, if any */ + switch (evt_type) { + case SDEV_EVT_MEDIA_CHANGE: + default: + /* do nothing */ + break; + } + + return evt; +} +EXPORT_SYMBOL_GPL(sdev_evt_alloc); + +/** + * sdev_evt_send_simple - send asserted event to uevent thread + * @sdev: scsi_device event occurred on + * @evt_type: type of event to send + * @gfpflags: GFP flags for allocation + * + * Assert scsi device event asynchronously, given an event type. + */ +void sdev_evt_send_simple(struct scsi_device *sdev, + enum scsi_device_event evt_type, gfp_t gfpflags) +{ + struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags); + if (!evt) { + sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n", + evt_type); + return; + } + + sdev_evt_send(sdev, evt); +} +EXPORT_SYMBOL_GPL(sdev_evt_send_simple); + +/** * scsi_device_quiesce - Block user issued commands. * @sdev: scsi device to quiesce. * @@ -2119,7 +2307,7 @@ EXPORT_SYMBOL(scsi_device_set_state); * Must be called with user context, may sleep. * * Returns zero if unsuccessful or an error if not. - **/ + */ int scsi_device_quiesce(struct scsi_device *sdev) { @@ -2144,7 +2332,7 @@ EXPORT_SYMBOL(scsi_device_quiesce); * queues. * * Must be called with user context, may sleep. - **/ + */ void scsi_device_resume(struct scsi_device *sdev) { @@ -2181,8 +2369,7 @@ scsi_target_resume(struct scsi_target *starget) EXPORT_SYMBOL(scsi_target_resume); /** - * scsi_internal_device_block - internal function to put a device - * temporarily into the SDEV_BLOCK state + * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state * @sdev: device to block * * Block request made by scsi lld's to temporarily stop all @@ -2197,7 +2384,7 @@ EXPORT_SYMBOL(scsi_target_resume); * state, all commands are deferred until the scsi lld reenables * the device with scsi_device_unblock or device_block_tmo fires. * This routine assumes the host_lock is held on entry. - **/ + */ int scsi_internal_device_block(struct scsi_device *sdev) { @@ -2237,7 +2424,7 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_block); * (which must be a legal transition) allowing the midlayer to * goose the queue for this device. This routine assumes the * host_lock is held upon entry. - **/ + */ int scsi_internal_device_unblock(struct scsi_device *sdev) { @@ -2315,7 +2502,7 @@ EXPORT_SYMBOL_GPL(scsi_target_unblock); /** * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt - * @sg: scatter-gather list + * @sgl: scatter-gather list * @sg_count: number of segments in sg * @offset: offset in bytes into sg, on return offset into the mapped area * @len: bytes to map, on return number of bytes mapped @@ -2342,7 +2529,7 @@ void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count, if (unlikely(i == sg_count)) { printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, " "elements %d\n", - __FUNCTION__, sg_len, *offset, sg_count); + __func__, sg_len, *offset, sg_count); WARN_ON(1); return NULL; } @@ -2351,7 +2538,7 @@ void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count, *offset = *offset - len_complete + sg->offset; /* Assumption: contiguous pages can be accessed as "page + i" */ - page = nth_page(sg->page, (*offset >> PAGE_SHIFT)); + page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT)); *offset &= ~PAGE_MASK; /* Bytes in this sg-entry from *offset to the end of the page */ @@ -2364,8 +2551,7 @@ void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count, EXPORT_SYMBOL(scsi_kmap_atomic_sg); /** - * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously - * mapped with scsi_kmap_atomic_sg + * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg * @virt: virtual address to be unmapped */ void scsi_kunmap_atomic_sg(void *virt)