git://ftp.safe.ca
/
safe
/
jmp
/
linux-2.6
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
ext4: Enable extent format for symlinks.
[safe/jmp/linux-2.6]
/
block
/
blk-core.c
diff --git
a/block/blk-core.c
b/block/blk-core.c
index
2358fc5
..
5d09f8c
100644
(file)
--- a/
block/blk-core.c
+++ b/
block/blk-core.c
@@
-38,7
+38,7
@@
static int __make_request(struct request_queue *q, struct bio *bio);
/*
* For the allocated request tables
*/
/*
* For the allocated request tables
*/
-struct kmem_cache *request_cachep;
+st
atic st
ruct kmem_cache *request_cachep;
/*
* For queue allocation
/*
* For queue allocation
@@
-107,40
+107,21
@@
struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
}
EXPORT_SYMBOL(blk_get_backing_dev_info);
}
EXPORT_SYMBOL(blk_get_backing_dev_info);
-/*
- * We can't just memset() the structure, since the allocation path
- * already stored some information in the request.
- */
-void rq_init(struct request_queue *q, struct request *rq)
+void blk_rq_init(struct request_queue *q, struct request *rq)
{
{
+ memset(rq, 0, sizeof(*rq));
+
INIT_LIST_HEAD(&rq->queuelist);
INIT_LIST_HEAD(&rq->donelist);
rq->q = q;
rq->sector = rq->hard_sector = (sector_t) -1;
INIT_LIST_HEAD(&rq->queuelist);
INIT_LIST_HEAD(&rq->donelist);
rq->q = q;
rq->sector = rq->hard_sector = (sector_t) -1;
- rq->nr_sectors = rq->hard_nr_sectors = 0;
- rq->current_nr_sectors = rq->hard_cur_sectors = 0;
- rq->bio = rq->biotail = NULL;
INIT_HLIST_NODE(&rq->hash);
RB_CLEAR_NODE(&rq->rb_node);
INIT_HLIST_NODE(&rq->hash);
RB_CLEAR_NODE(&rq->rb_node);
- rq->rq_disk = NULL;
- rq->nr_phys_segments = 0;
- rq->nr_hw_segments = 0;
- rq->ioprio = 0;
- rq->special = NULL;
- rq->buffer = NULL;
+ rq->cmd = rq->__cmd;
rq->tag = -1;
rq->tag = -1;
- rq->errors = 0;
rq->ref_count = 1;
rq->ref_count = 1;
- rq->cmd_len = 0;
- memset(rq->cmd, 0, sizeof(rq->cmd));
- rq->data_len = 0;
- rq->sense_len = 0;
- rq->data = NULL;
- rq->sense = NULL;
- rq->end_io = NULL;
- rq->end_io_data = NULL;
- rq->next_rq = NULL;
}
}
+EXPORT_SYMBOL(blk_rq_init);
static void req_bio_endio(struct request *rq, struct bio *bio,
unsigned int nbytes, int error)
static void req_bio_endio(struct request *rq, struct bio *bio,
unsigned int nbytes, int error)
@@
-193,7
+174,7
@@
void blk_dump_rq_flags(struct request *rq, char *msg)
if (blk_pc_request(rq)) {
printk(KERN_INFO " cdb: ");
if (blk_pc_request(rq)) {
printk(KERN_INFO " cdb: ");
- for (bit = 0; bit <
sizeof(rq->cmd)
; bit++)
+ for (bit = 0; bit <
BLK_MAX_CDB
; bit++)
printk("%02x ", rq->cmd[bit]);
printk("\n");
}
printk("%02x ", rq->cmd[bit]);
printk("\n");
}
@@
-219,7
+200,8
@@
void blk_plug_device(struct request_queue *q)
if (blk_queue_stopped(q))
return;
if (blk_queue_stopped(q))
return;
- if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
+ if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
+ __set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags);
mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
}
mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
}
@@
-234,9
+216,10
@@
int blk_remove_plug(struct request_queue *q)
{
WARN_ON(!irqs_disabled());
{
WARN_ON(!irqs_disabled());
- if (!test_
and_clear_
bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
+ if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
return 0;
return 0;
+ queue_flag_clear(QUEUE_FLAG_PLUGGED, q);
del_timer(&q->unplug_timer);
return 1;
}
del_timer(&q->unplug_timer);
return 1;
}
@@
-332,15
+315,16
@@
void blk_start_queue(struct request_queue *q)
{
WARN_ON(!irqs_disabled());
{
WARN_ON(!irqs_disabled());
-
clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags
);
+
queue_flag_clear(QUEUE_FLAG_STOPPED, q
);
/*
* one level of recursion is ok and is much faster than kicking
* the unplug handling
*/
/*
* one level of recursion is ok and is much faster than kicking
* the unplug handling
*/
- if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
+ if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
+ queue_flag_set(QUEUE_FLAG_REENTER, q);
q->request_fn(q);
q->request_fn(q);
-
clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags
);
+
queue_flag_clear(QUEUE_FLAG_REENTER, q
);
} else {
blk_plug_device(q);
kblockd_schedule_work(&q->unplug_work);
} else {
blk_plug_device(q);
kblockd_schedule_work(&q->unplug_work);
@@
-365,7
+349,7
@@
EXPORT_SYMBOL(blk_start_queue);
void blk_stop_queue(struct request_queue *q)
{
blk_remove_plug(q);
void blk_stop_queue(struct request_queue *q)
{
blk_remove_plug(q);
-
set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags
);
+
queue_flag_set(QUEUE_FLAG_STOPPED, q
);
}
EXPORT_SYMBOL(blk_stop_queue);
}
EXPORT_SYMBOL(blk_stop_queue);
@@
-394,11
+378,8
@@
EXPORT_SYMBOL(blk_sync_queue);
* blk_run_queue - run a single device queue
* @q: The queue to run
*/
* blk_run_queue - run a single device queue
* @q: The queue to run
*/
-void blk_run_queue(struct request_queue *q)
+void
__
blk_run_queue(struct request_queue *q)
{
{
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
blk_remove_plug(q);
/*
blk_remove_plug(q);
/*
@@
-406,15
+387,28
@@
void blk_run_queue(struct request_queue *q)
* handling reinvoke the handler shortly if we already got there.
*/
if (!elv_queue_empty(q)) {
* handling reinvoke the handler shortly if we already got there.
*/
if (!elv_queue_empty(q)) {
- if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
+ if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
+ queue_flag_set(QUEUE_FLAG_REENTER, q);
q->request_fn(q);
q->request_fn(q);
-
clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags
);
+
queue_flag_clear(QUEUE_FLAG_REENTER, q
);
} else {
blk_plug_device(q);
kblockd_schedule_work(&q->unplug_work);
}
}
} else {
blk_plug_device(q);
kblockd_schedule_work(&q->unplug_work);
}
}
+}
+EXPORT_SYMBOL(__blk_run_queue);
+/**
+ * blk_run_queue - run a single device queue
+ * @q: The queue to run
+ */
+void blk_run_queue(struct request_queue *q)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ __blk_run_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL(blk_run_queue);
spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL(blk_run_queue);
@@
-423,12
+417,11
@@
void blk_put_queue(struct request_queue *q)
{
kobject_put(&q->kobj);
}
{
kobject_put(&q->kobj);
}
-EXPORT_SYMBOL(blk_put_queue);
void blk_cleanup_queue(struct request_queue *q)
{
mutex_lock(&q->sysfs_lock);
void blk_cleanup_queue(struct request_queue *q)
{
mutex_lock(&q->sysfs_lock);
-
set_bit(QUEUE_FLAG_DEAD, &q->queue_flags
);
+
queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q
);
mutex_unlock(&q->sysfs_lock);
if (q->elevator)
mutex_unlock(&q->sysfs_lock);
if (q->elevator)
@@
-591,7
+584,6
@@
int blk_get_queue(struct request_queue *q)
return 1;
}
return 1;
}
-EXPORT_SYMBOL(blk_get_queue);
static inline void blk_free_request(struct request_queue *q, struct request *rq)
{
static inline void blk_free_request(struct request_queue *q, struct request *rq)
{
@@
-608,6
+600,8
@@
blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask)
if (!rq)
return NULL;
if (!rq)
return NULL;
+ blk_rq_init(q, rq);
+
/*
* first three bits are identical in rq->cmd_flags and bio->bi_rw,
* see bio.h and blkdev.h
/*
* first three bits are identical in rq->cmd_flags and bio->bi_rw,
* see bio.h and blkdev.h
@@
-790,8
+784,6
@@
rq_starved:
if (ioc_batching(q, ioc))
ioc->nr_batch_requests--;
if (ioc_batching(q, ioc))
ioc->nr_batch_requests--;
- rq_init(q, rq);
-
blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
out:
return rq;
blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
out:
return rq;
@@
-1219,10
+1211,6
@@
static inline void blk_partition_remap(struct bio *bio)
if (bio_sectors(bio) && bdev != bdev->bd_contains) {
struct hd_struct *p = bdev->bd_part;
if (bio_sectors(bio) && bdev != bdev->bd_contains) {
struct hd_struct *p = bdev->bd_part;
- const int rw = bio_data_dir(bio);
-
- p->sectors[rw] += bio_sectors(bio);
- p->ios[rw]++;
bio->bi_sector += p->start_sect;
bio->bi_bdev = bdev->bd_contains;
bio->bi_sector += p->start_sect;
bio->bi_bdev = bdev->bd_contains;
@@
-1771,6
+1759,7
@@
static inline void __end_request(struct request *rq, int uptodate,
/**
* blk_rq_bytes - Returns bytes left to complete in the entire request
/**
* blk_rq_bytes - Returns bytes left to complete in the entire request
+ * @rq: the request being processed
**/
unsigned int blk_rq_bytes(struct request *rq)
{
**/
unsigned int blk_rq_bytes(struct request *rq)
{
@@
-1783,6
+1772,7
@@
EXPORT_SYMBOL_GPL(blk_rq_bytes);
/**
* blk_rq_cur_bytes - Returns bytes left to complete in the current segment
/**
* blk_rq_cur_bytes - Returns bytes left to complete in the current segment
+ * @rq: the request being processed
**/
unsigned int blk_rq_cur_bytes(struct request *rq)
{
**/
unsigned int blk_rq_cur_bytes(struct request *rq)
{