X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=block%2Fas-iosched.c;h=71f0abb219eee2556d41dcc9f1f1e01834b6d8aa;hb=0ed0563e14dcb9986241d30f08ecd33f9bcc3572;hp=612d640963006a45a677571b93aa2e182a8e5d09;hpb=8bdd3f8a6993fef2f364aca6e1a59559405773a2;p=safe%2Fjmp%2Flinux-2.6 diff --git a/block/as-iosched.c b/block/as-iosched.c index 612d640..71f0abb 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c @@ -151,6 +151,7 @@ enum arq_state { static DEFINE_PER_CPU(unsigned long, ioc_count); static struct completion *ioc_gone; +static DEFINE_SPINLOCK(ioc_gone_lock); static void as_move_to_dispatch(struct as_data *ad, struct request *rq); static void as_antic_stop(struct as_data *ad); @@ -164,8 +165,19 @@ static void free_as_io_context(struct as_io_context *aic) { kfree(aic); elv_ioc_count_dec(ioc_count); - if (ioc_gone && !elv_ioc_count_read(ioc_count)) - complete(ioc_gone); + if (ioc_gone) { + /* + * AS scheduler is exiting, grab exit lock and check + * the pending io context count. If it hits zero, + * complete ioc_gone and set it back to NULL. + */ + spin_lock(&ioc_gone_lock); + if (ioc_gone && !elv_ioc_count_read(ioc_count)) { + complete(ioc_gone); + ioc_gone = NULL; + } + spin_unlock(&ioc_gone_lock); + } } static void as_trim(struct io_context *ioc) @@ -450,7 +462,7 @@ static void as_antic_stop(struct as_data *ad) del_timer(&ad->antic_timer); ad->antic_status = ANTIC_FINISHED; /* see as_work_handler */ - kblockd_schedule_work(&ad->antic_work); + kblockd_schedule_work(ad->q, &ad->antic_work); } } @@ -471,7 +483,7 @@ static void as_antic_timeout(unsigned long data) aic = ad->io_context->aic; ad->antic_status = ANTIC_FINISHED; - kblockd_schedule_work(&ad->antic_work); + kblockd_schedule_work(q, &ad->antic_work); if (aic->ttime_samples == 0) { /* process anticipated on has exited or timed out*/ @@ -733,6 +745,14 @@ static int as_can_break_anticipation(struct as_data *ad, struct request *rq) */ static int as_can_anticipate(struct as_data *ad, struct request *rq) { +#if 0 /* disable for now, we need to check tag level as well */ + /* + * SSD device without seek penalty, disable idling + */ + if (blk_queue_nonrot(ad->q)) axman + return 0; +#endif + if (!ad->io_context) /* * Last request submitted was a write @@ -825,13 +845,14 @@ static void as_completed_request(struct request_queue *q, struct request *rq) WARN_ON(!list_empty(&rq->queuelist)); if (RQ_STATE(rq) != AS_RQ_REMOVED) { - printk("rq->state %d\n", RQ_STATE(rq)); - WARN_ON(1); + WARN(1, "rq->state %d\n", RQ_STATE(rq)); goto out; } if (ad->changed_batch && ad->nr_dispatched == 1) { - kblockd_schedule_work(&ad->antic_work); + ad->current_batch_expires = jiffies + + ad->batch_expire[ad->batch_data_dir]; + kblockd_schedule_work(q, &ad->antic_work); ad->changed_batch = 0; if (ad->batch_data_dir == REQ_SYNC) @@ -1268,22 +1289,8 @@ static void as_merged_requests(struct request_queue *q, struct request *req, */ if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) { if (time_before(rq_fifo_time(next), rq_fifo_time(req))) { - struct io_context *rioc = RQ_IOC(req); - struct io_context *nioc = RQ_IOC(next); - list_move(&req->queuelist, &next->queuelist); rq_set_fifo_time(req, rq_fifo_time(next)); - /* - * Don't copy here but swap, because when anext is - * removed below, it must contain the unused context - */ - if (rioc != nioc) { - double_spin_lock(&rioc->lock, &nioc->lock, - rioc < nioc); - swap_io_context(&rioc, &nioc); - double_spin_unlock(&rioc->lock, &nioc->lock, - rioc < nioc); - } } } @@ -1505,7 +1512,7 @@ static void __exit as_exit(void) /* ioc_gone's update must be visible before reading ioc_count */ smp_wmb(); if (elv_ioc_count_read(ioc_count)) - wait_for_completion(ioc_gone); + wait_for_completion(&all_gone); synchronize_rcu(); }