block: hold extra reference to bio in blk_rq_map_user_iov()
[safe/jmp/linux-2.6] / block / as-iosched.c
index 9603684..71f0abb 100644 (file)
@@ -151,6 +151,7 @@ enum arq_state {
 
 static DEFINE_PER_CPU(unsigned long, ioc_count);
 static struct completion *ioc_gone;
+static DEFINE_SPINLOCK(ioc_gone_lock);
 
 static void as_move_to_dispatch(struct as_data *ad, struct request *rq);
 static void as_antic_stop(struct as_data *ad);
@@ -164,17 +165,28 @@ static void free_as_io_context(struct as_io_context *aic)
 {
        kfree(aic);
        elv_ioc_count_dec(ioc_count);
-       if (ioc_gone && !elv_ioc_count_read(ioc_count))
-               complete(ioc_gone);
+       if (ioc_gone) {
+               /*
+                * AS scheduler is exiting, grab exit lock and check
+                * the pending io context count. If it hits zero,
+                * complete ioc_gone and set it back to NULL.
+                */
+               spin_lock(&ioc_gone_lock);
+               if (ioc_gone && !elv_ioc_count_read(ioc_count)) {
+                       complete(ioc_gone);
+                       ioc_gone = NULL;
+               }
+               spin_unlock(&ioc_gone_lock);
+       }
 }
 
 static void as_trim(struct io_context *ioc)
 {
-       spin_lock(&ioc->lock);
+       spin_lock_irq(&ioc->lock);
        if (ioc->aic)
                free_as_io_context(ioc->aic);
        ioc->aic = NULL;
-       spin_unlock(&ioc->lock);
+       spin_unlock_irq(&ioc->lock);
 }
 
 /* Called when the task exits */
@@ -235,10 +247,12 @@ static void as_put_io_context(struct request *rq)
        aic = RQ_IOC(rq)->aic;
 
        if (rq_is_sync(rq) && aic) {
-               spin_lock(&aic->lock);
+               unsigned long flags;
+
+               spin_lock_irqsave(&aic->lock, flags);
                set_bit(AS_TASK_IORUNNING, &aic->state);
                aic->last_end_request = jiffies;
-               spin_unlock(&aic->lock);
+               spin_unlock_irqrestore(&aic->lock, flags);
        }
 
        put_io_context(RQ_IOC(rq));
@@ -448,7 +462,7 @@ static void as_antic_stop(struct as_data *ad)
                        del_timer(&ad->antic_timer);
                ad->antic_status = ANTIC_FINISHED;
                /* see as_work_handler */
-               kblockd_schedule_work(&ad->antic_work);
+               kblockd_schedule_work(ad->q, &ad->antic_work);
        }
 }
 
@@ -469,7 +483,7 @@ static void as_antic_timeout(unsigned long data)
                aic = ad->io_context->aic;
 
                ad->antic_status = ANTIC_FINISHED;
-               kblockd_schedule_work(&ad->antic_work);
+               kblockd_schedule_work(q, &ad->antic_work);
 
                if (aic->ttime_samples == 0) {
                        /* process anticipated on has exited or timed out*/
@@ -731,6 +745,14 @@ static int as_can_break_anticipation(struct as_data *ad, struct request *rq)
  */
 static int as_can_anticipate(struct as_data *ad, struct request *rq)
 {
+#if 0 /* disable for now, we need to check tag level as well */
+       /*
+        * SSD device without seek penalty, disable idling
+        */
+       if (blk_queue_nonrot(ad->q)) axman
+               return 0;
+#endif
+
        if (!ad->io_context)
                /*
                 * Last request submitted was a write
@@ -823,13 +845,14 @@ static void as_completed_request(struct request_queue *q, struct request *rq)
        WARN_ON(!list_empty(&rq->queuelist));
 
        if (RQ_STATE(rq) != AS_RQ_REMOVED) {
-               printk("rq->state %d\n", RQ_STATE(rq));
-               WARN_ON(1);
+               WARN(1, "rq->state %d\n", RQ_STATE(rq));
                goto out;
        }
 
        if (ad->changed_batch && ad->nr_dispatched == 1) {
-               kblockd_schedule_work(&ad->antic_work);
+               ad->current_batch_expires = jiffies +
+                                       ad->batch_expire[ad->batch_data_dir];
+               kblockd_schedule_work(q, &ad->antic_work);
                ad->changed_batch = 0;
 
                if (ad->batch_data_dir == REQ_SYNC)
@@ -1266,22 +1289,8 @@ static void as_merged_requests(struct request_queue *q, struct request *req,
         */
        if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
                if (time_before(rq_fifo_time(next), rq_fifo_time(req))) {
-                       struct io_context *rioc = RQ_IOC(req);
-                       struct io_context *nioc = RQ_IOC(next);
-
                        list_move(&req->queuelist, &next->queuelist);
                        rq_set_fifo_time(req, rq_fifo_time(next));
-                       /*
-                        * Don't copy here but swap, because when anext is
-                        * removed below, it must contain the unused context
-                        */
-                       if (rioc != nioc) {
-                               double_spin_lock(&rioc->lock, &nioc->lock,
-                                                               rioc < nioc);
-                               swap_io_context(&rioc, &nioc);
-                               double_spin_unlock(&rioc->lock, &nioc->lock,
-                                                               rioc < nioc);
-                       }
                }
        }
 
@@ -1503,7 +1512,7 @@ static void __exit as_exit(void)
        /* ioc_gone's update must be visible before reading ioc_count */
        smp_wmb();
        if (elv_ioc_count_read(ioc_count))
-               wait_for_completion(ioc_gone);
+               wait_for_completion(&all_gone);
        synchronize_rcu();
 }