#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
+#include <linux/slab.h>
+
+#include "blk.h"
/**
* blk_queue_find_tag - find a request by its tag and queue
* __blk_free_tags - release a given set of tag maintenance info
* @bqt: the tag map to free
*
- * Tries to free the specified @bqt@. Returns true if it was
+ * Tries to free the specified @bqt. Returns true if it was
* actually freed and false if there are still references using it
*/
static int __blk_free_tags(struct blk_queue_tag *bqt)
retval = atomic_dec_and_test(&bqt->refcnt);
if (retval) {
- BUG_ON(bqt->busy);
+ BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
+ bqt->max_depth);
kfree(bqt->tag_index);
bqt->tag_index = NULL;
__blk_free_tags(bqt);
q->queue_tags = NULL;
- q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED);
+ queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
}
/**
* blk_free_tags - release a given set of tag maintenance info
* @bqt: the tag map to free
*
- * For externally managed @bqt@ frees the map. Callers of this
+ * For externally managed @bqt frees the map. Callers of this
* function must guarantee to have released all the queues that
* might have been using this tag map.
*/
* @q: the request queue for the device
*
* Notes:
- * This is used to disabled tagged queuing to a device, yet leave
+ * This is used to disable tagged queuing to a device, yet leave
* queue in function.
**/
void blk_queue_free_tags(struct request_queue *q)
{
- clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
+ queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
}
EXPORT_SYMBOL(blk_queue_free_tags);
if (q && depth > q->nr_requests * 2) {
depth = q->nr_requests * 2;
printk(KERN_ERR "%s: adjusted depth to %d\n",
- __FUNCTION__, depth);
+ __func__, depth);
}
tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
if (init_tag_map(q, tags, depth))
goto fail;
- tags->busy = 0;
atomic_set(&tags->refcnt, 1);
return tags;
fail:
/**
* blk_init_tags - initialize the tag info for an external tag map
* @depth: the maximum queue depth supported
- * @tags: the tag to use
**/
struct blk_queue_tag *blk_init_tags(int depth)
{
* @q: the request queue for the device
* @depth: the maximum queue depth supported
* @tags: the tag to use
+ *
+ * Queue lock must be held here if the function is called to resize an
+ * existing map.
**/
int blk_queue_init_tags(struct request_queue *q, int depth,
struct blk_queue_tag *tags)
rc = blk_queue_resize_tags(q, depth);
if (rc)
return rc;
- set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
+ queue_flag_set(QUEUE_FLAG_QUEUED, q);
return 0;
} else
atomic_inc(&tags->refcnt);
* assign it, all done
*/
q->queue_tags = tags;
- q->queue_flags |= (1 << QUEUE_FLAG_QUEUED);
+ queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);
INIT_LIST_HEAD(&q->tag_busy_list);
return 0;
fail:
* @rq: the request that has completed
*
* Description:
- * Typically called when end_that_request_first() returns 0, meaning
+ * Typically called when end_that_request_first() returns %0, meaning
* all transfers have been done for a request. It's important to call
* this function before end_that_request_last(), as that will put the
* request back on the free list thus corrupting the internal tag list.
if (unlikely(bqt->tag_index[tag] == NULL))
printk(KERN_ERR "%s: tag %d is missing\n",
- __FUNCTION__, tag);
+ __func__, tag);
bqt->tag_index[tag] = NULL;
if (unlikely(!test_bit(tag, bqt->tag_map))) {
printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
- __FUNCTION__, tag);
+ __func__, tag);
return;
}
/*
* unlock memory barrier semantics.
*/
clear_bit_unlock(tag, bqt->tag_map);
- bqt->busy--;
}
EXPORT_SYMBOL(blk_queue_end_tag);
int blk_queue_start_tag(struct request_queue *q, struct request *rq)
{
struct blk_queue_tag *bqt = q->queue_tags;
+ unsigned max_depth;
int tag;
if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
printk(KERN_ERR
"%s: request %p for device [%s] already tagged %d",
- __FUNCTION__, rq,
+ __func__, rq,
rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
BUG();
}
/*
* Protect against shared tag maps, as we may not have exclusive
* access to the tag map.
+ *
+ * We reserve a few tags just for sync IO, since we don't want
+ * to starve sync IO on behalf of flooding async IO.
*/
+ max_depth = bqt->max_depth;
+ if (!rq_is_sync(rq) && max_depth > 1) {
+ max_depth -= 2;
+ if (!max_depth)
+ max_depth = 1;
+ if (q->in_flight[BLK_RW_ASYNC] > max_depth)
+ return 1;
+ }
+
do {
- tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth);
- if (tag >= bqt->max_depth)
+ tag = find_first_zero_bit(bqt->tag_map, max_depth);
+ if (tag >= max_depth)
return 1;
} while (test_and_set_bit_lock(tag, bqt->tag_map));
rq->cmd_flags |= REQ_QUEUED;
rq->tag = tag;
bqt->tag_index[tag] = rq;
- blkdev_dequeue_request(rq);
+ blk_start_request(rq);
list_add(&rq->queuelist, &q->tag_busy_list);
- bqt->busy++;
return 0;
}
EXPORT_SYMBOL(blk_queue_start_tag);