X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=block%2Fblk-tag.c;h=ece65fc4c79b511c37fb3eb89c4a746dc105ba8d;hb=b9b76dfaac6fa2c289ee8a005be637afd2da7e2f;hp=4780a46ce2346898953085a6f4079c7734dcbaf7;hpb=278caf0120a77e4398762357a8cc522d094fe2f2;p=safe%2Fjmp%2Flinux-2.6 diff --git a/block/blk-tag.c b/block/blk-tag.c index 4780a46..ece65fc 100644 --- a/block/blk-tag.c +++ b/block/blk-tag.c @@ -5,6 +5,7 @@ #include #include #include +#include #include "blk.h" @@ -29,7 +30,7 @@ EXPORT_SYMBOL(blk_queue_find_tag); * __blk_free_tags - release a given set of tag maintenance info * @bqt: the tag map to free * - * Tries to free the specified @bqt@. Returns true if it was + * Tries to free the specified @bqt. Returns true if it was * actually freed and false if there are still references using it */ static int __blk_free_tags(struct blk_queue_tag *bqt) @@ -38,7 +39,8 @@ static int __blk_free_tags(struct blk_queue_tag *bqt) retval = atomic_dec_and_test(&bqt->refcnt); if (retval) { - BUG_ON(bqt->busy); + BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) < + bqt->max_depth); kfree(bqt->tag_index); bqt->tag_index = NULL; @@ -70,14 +72,14 @@ void __blk_queue_free_tags(struct request_queue *q) __blk_free_tags(bqt); q->queue_tags = NULL; - q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED); + queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q); } /** * blk_free_tags - release a given set of tag maintenance info * @bqt: the tag map to free * - * For externally managed @bqt@ frees the map. Callers of this + * For externally managed @bqt frees the map. Callers of this * function must guarantee to have released all the queues that * might have been using this tag map. */ @@ -93,12 +95,12 @@ EXPORT_SYMBOL(blk_free_tags); * @q: the request queue for the device * * Notes: - * This is used to disabled tagged queuing to a device, yet leave + * This is used to disable tagged queuing to a device, yet leave * queue in function. **/ void blk_queue_free_tags(struct request_queue *q) { - clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); + queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q); } EXPORT_SYMBOL(blk_queue_free_tags); @@ -112,7 +114,7 @@ init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth) if (q && depth > q->nr_requests * 2) { depth = q->nr_requests * 2; printk(KERN_ERR "%s: adjusted depth to %d\n", - __FUNCTION__, depth); + __func__, depth); } tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC); @@ -147,7 +149,6 @@ static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q, if (init_tag_map(q, tags, depth)) goto fail; - tags->busy = 0; atomic_set(&tags->refcnt, 1); return tags; fail: @@ -158,7 +159,6 @@ fail: /** * blk_init_tags - initialize the tag info for an external tag map * @depth: the maximum queue depth supported - * @tags: the tag to use **/ struct blk_queue_tag *blk_init_tags(int depth) { @@ -171,6 +171,9 @@ EXPORT_SYMBOL(blk_init_tags); * @q: the request queue for the device * @depth: the maximum queue depth supported * @tags: the tag to use + * + * Queue lock must be held here if the function is called to resize an + * existing map. **/ int blk_queue_init_tags(struct request_queue *q, int depth, struct blk_queue_tag *tags) @@ -188,7 +191,7 @@ int blk_queue_init_tags(struct request_queue *q, int depth, rc = blk_queue_resize_tags(q, depth); if (rc) return rc; - set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); + queue_flag_set(QUEUE_FLAG_QUEUED, q); return 0; } else atomic_inc(&tags->refcnt); @@ -197,7 +200,7 @@ int blk_queue_init_tags(struct request_queue *q, int depth, * assign it, all done */ q->queue_tags = tags; - q->queue_flags |= (1 << QUEUE_FLAG_QUEUED); + queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q); INIT_LIST_HEAD(&q->tag_busy_list); return 0; fail: @@ -268,7 +271,7 @@ EXPORT_SYMBOL(blk_queue_resize_tags); * @rq: the request that has completed * * Description: - * Typically called when end_that_request_first() returns 0, meaning + * Typically called when end_that_request_first() returns %0, meaning * all transfers have been done for a request. It's important to call * this function before end_that_request_last(), as that will put the * request back on the free list thus corrupting the internal tag list. @@ -296,13 +299,13 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq) if (unlikely(bqt->tag_index[tag] == NULL)) printk(KERN_ERR "%s: tag %d is missing\n", - __FUNCTION__, tag); + __func__, tag); bqt->tag_index[tag] = NULL; if (unlikely(!test_bit(tag, bqt->tag_map))) { printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n", - __FUNCTION__, tag); + __func__, tag); return; } /* @@ -310,7 +313,6 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq) * unlock memory barrier semantics. */ clear_bit_unlock(tag, bqt->tag_map); - bqt->busy--; } EXPORT_SYMBOL(blk_queue_end_tag); @@ -335,12 +337,13 @@ EXPORT_SYMBOL(blk_queue_end_tag); int blk_queue_start_tag(struct request_queue *q, struct request *rq) { struct blk_queue_tag *bqt = q->queue_tags; + unsigned max_depth; int tag; if (unlikely((rq->cmd_flags & REQ_QUEUED))) { printk(KERN_ERR "%s: request %p for device [%s] already tagged %d", - __FUNCTION__, rq, + __func__, rq, rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag); BUG(); } @@ -348,10 +351,22 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq) /* * Protect against shared tag maps, as we may not have exclusive * access to the tag map. + * + * We reserve a few tags just for sync IO, since we don't want + * to starve sync IO on behalf of flooding async IO. */ + max_depth = bqt->max_depth; + if (!rq_is_sync(rq) && max_depth > 1) { + max_depth -= 2; + if (!max_depth) + max_depth = 1; + if (q->in_flight[BLK_RW_ASYNC] > max_depth) + return 1; + } + do { - tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth); - if (tag >= bqt->max_depth) + tag = find_first_zero_bit(bqt->tag_map, max_depth); + if (tag >= max_depth) return 1; } while (test_and_set_bit_lock(tag, bqt->tag_map)); @@ -363,9 +378,8 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq) rq->cmd_flags |= REQ_QUEUED; rq->tag = tag; bqt->tag_index[tag] = rq; - blkdev_dequeue_request(rq); + blk_start_request(rq); list_add(&rq->queuelist, &q->tag_busy_list); - bqt->busy++; return 0; } EXPORT_SYMBOL(blk_queue_start_tag);