block: make kblockd_schedule_work() take the queue as parameter
authorJens Axboe <jens.axboe@oracle.com>
Mon, 28 Jul 2008 11:08:45 +0000 (13:08 +0200)
committerJens Axboe <jens.axboe@oracle.com>
Thu, 9 Oct 2008 06:56:09 +0000 (08:56 +0200)
Preparatory patch for checking queuing affinity.

Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
block/as-iosched.c
block/blk-core.c
block/cfq-iosched.c
include/linux/blkdev.h

index cf4eb0e..80af925 100644 (file)
@@ -462,7 +462,7 @@ static void as_antic_stop(struct as_data *ad)
                        del_timer(&ad->antic_timer);
                ad->antic_status = ANTIC_FINISHED;
                /* see as_work_handler */
-               kblockd_schedule_work(&ad->antic_work);
+               kblockd_schedule_work(ad->q, &ad->antic_work);
        }
 }
 
@@ -483,7 +483,7 @@ static void as_antic_timeout(unsigned long data)
                aic = ad->io_context->aic;
 
                ad->antic_status = ANTIC_FINISHED;
-               kblockd_schedule_work(&ad->antic_work);
+               kblockd_schedule_work(q, &ad->antic_work);
 
                if (aic->ttime_samples == 0) {
                        /* process anticipated on has exited or timed out*/
@@ -844,7 +844,7 @@ static void as_completed_request(struct request_queue *q, struct request *rq)
        if (ad->changed_batch && ad->nr_dispatched == 1) {
                ad->current_batch_expires = jiffies +
                                        ad->batch_expire[ad->batch_data_dir];
-               kblockd_schedule_work(&ad->antic_work);
+               kblockd_schedule_work(q, &ad->antic_work);
                ad->changed_batch = 0;
 
                if (ad->batch_data_dir == REQ_SYNC)
index 527b338..9c6f818 100644 (file)
@@ -305,7 +305,7 @@ void blk_unplug_timeout(unsigned long data)
        blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
                                q->rq.count[READ] + q->rq.count[WRITE]);
 
-       kblockd_schedule_work(&q->unplug_work);
+       kblockd_schedule_work(q, &q->unplug_work);
 }
 
 void blk_unplug(struct request_queue *q)
@@ -346,7 +346,7 @@ void blk_start_queue(struct request_queue *q)
                queue_flag_clear(QUEUE_FLAG_REENTER, q);
        } else {
                blk_plug_device(q);
-               kblockd_schedule_work(&q->unplug_work);
+               kblockd_schedule_work(q, &q->unplug_work);
        }
 }
 EXPORT_SYMBOL(blk_start_queue);
@@ -411,7 +411,7 @@ void __blk_run_queue(struct request_queue *q)
                        queue_flag_clear(QUEUE_FLAG_REENTER, q);
                } else {
                        blk_plug_device(q);
-                       kblockd_schedule_work(&q->unplug_work);
+                       kblockd_schedule_work(q, &q->unplug_work);
                }
        }
 }
@@ -1959,7 +1959,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
                rq->rq_disk = bio->bi_bdev->bd_disk;
 }
 
-int kblockd_schedule_work(struct work_struct *work)
+int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
 {
        return queue_work(kblockd_workqueue, work);
 }
index 1e2aff8..5f6fd28 100644 (file)
@@ -244,7 +244,7 @@ static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
 {
        if (cfqd->busy_queues) {
                cfq_log(cfqd, "schedule dispatch");
-               kblockd_schedule_work(&cfqd->unplug_work);
+               kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
        }
 }
 
index 1adb038..10aa46c 100644 (file)
@@ -912,7 +912,7 @@ static inline void put_dev_sector(Sector p)
 }
 
 struct work_struct;
-int kblockd_schedule_work(struct work_struct *work);
+int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
 void kblockd_flush_work(struct work_struct *work);
 
 #define MODULE_ALIAS_BLOCKDEV(major,minor) \