cfq-iosched: Take care of corner cases of group losing share due to deletion
[safe/jmp/linux-2.6] / block / cfq-iosched.c
index 3815f97..98b15b9 100644 (file)
@@ -283,7 +283,7 @@ struct cfq_data {
         */
        struct cfq_queue oom_cfqq;
 
-       unsigned long last_end_sync_rq;
+       unsigned long last_delayed_sync;
 
        /* List of cfq groups being managed on this device*/
        struct hlist_head cfqg_list;
@@ -319,7 +319,6 @@ enum cfqq_state_flags {
        CFQ_CFQQ_FLAG_coop,             /* cfqq is shared */
        CFQ_CFQQ_FLAG_deep,             /* sync cfqq experienced large depth */
        CFQ_CFQQ_FLAG_wait_busy,        /* Waiting for next request */
-       CFQ_CFQQ_FLAG_wait_busy_done,   /* Got new request. Expire the queue */
 };
 
 #define CFQ_CFQQ_FNS(name)                                             \
@@ -348,7 +347,6 @@ CFQ_CFQQ_FNS(sync);
 CFQ_CFQQ_FNS(coop);
 CFQ_CFQQ_FNS(deep);
 CFQ_CFQQ_FNS(wait_busy);
-CFQ_CFQQ_FNS(wait_busy_done);
 #undef CFQ_CFQQ_FNS
 
 #ifdef CONFIG_DEBUG_CFQ_IOSCHED
@@ -1574,7 +1572,6 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 
        cfq_clear_cfqq_wait_request(cfqq);
        cfq_clear_cfqq_wait_busy(cfqq);
-       cfq_clear_cfqq_wait_busy_done(cfqq);
 
        /*
         * store what was left of this slice, if the queue idled/timed out
@@ -1750,6 +1747,12 @@ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
                return NULL;
 
        /*
+        * Don't search priority tree if it's the only queue in the group.
+        */
+       if (cur_cfqq->cfqg->nr_cfqq == 1)
+               return NULL;
+
+       /*
         * We should notice if some of the queues are cooperating, eg
         * working closely on the same area of the disk. In that case,
         * we can group them together and don't waste time idling.
@@ -2128,14 +2131,34 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
 
        if (!cfqd->rq_queued)
                return NULL;
+
        /*
-        * The active queue has run out of time, expire it and select new.
+        * We were waiting for group to get backlogged. Expire the queue
         */
-       if ((cfq_slice_used(cfqq) || cfq_cfqq_wait_busy_done(cfqq))
-            && !cfq_cfqq_must_dispatch(cfqq))
+       if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
                goto expire;
 
        /*
+        * The active queue has run out of time, expire it and select new.
+        */
+       if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
+               /*
+                * If slice had not expired at the completion of last request
+                * we might not have turned on wait_busy flag. Don't expire
+                * the queue yet. Allow the group to get backlogged.
+                *
+                * The very fact that we have used the slice, that means we
+                * have been idling all along on this queue and it should be
+                * ok to wait for this request to complete.
+                */
+               if (cfqq->cfqg->nr_cfqq == 1 && cfqq->dispatched
+                   && cfq_should_idle(cfqd, cfqq))
+                       goto keep_queue;
+               else
+                       goto expire;
+       }
+
+       /*
         * The active queue has requests and isn't expired, allow it to
         * dispatch.
         */
@@ -2264,7 +2287,7 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
         * based on the last sync IO we serviced
         */
        if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
-               unsigned long last_sync = jiffies - cfqd->last_end_sync_rq;
+               unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
                unsigned int depth;
 
                depth = last_sync / cfqd->cfq_slice[1];
@@ -2368,7 +2391,7 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
 static void cfq_put_queue(struct cfq_queue *cfqq)
 {
        struct cfq_data *cfqd = cfqq->cfqd;
-       struct cfq_group *cfqg;
+       struct cfq_group *cfqg, *orig_cfqg;
 
        BUG_ON(atomic_read(&cfqq->ref) <= 0);
 
@@ -2379,6 +2402,7 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
        BUG_ON(rb_first(&cfqq->sort_list));
        BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
        cfqg = cfqq->cfqg;
+       orig_cfqg = cfqq->orig_cfqg;
 
        if (unlikely(cfqd->active_queue == cfqq)) {
                __cfq_slice_expired(cfqd, cfqq, 0);
@@ -2388,8 +2412,8 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
        BUG_ON(cfq_cfqq_on_rr(cfqq));
        kmem_cache_free(cfq_pool, cfqq);
        cfq_put_cfqg(cfqg);
-       if (cfqq->orig_cfqg)
-               cfq_put_cfqg(cfqq->orig_cfqg);
+       if (orig_cfqg)
+               cfq_put_cfqg(orig_cfqg);
 }
 
 /*
@@ -3164,10 +3188,6 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
        cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
 
        if (cfqq == cfqd->active_queue) {
-               if (cfq_cfqq_wait_busy(cfqq)) {
-                       cfq_clear_cfqq_wait_busy(cfqq);
-                       cfq_mark_cfqq_wait_busy_done(cfqq);
-               }
                /*
                 * Remember that we saw a request from this process, but
                 * don't start queuing just yet. Otherwise we risk seeing lots
@@ -3250,6 +3270,35 @@ static void cfq_update_hw_tag(struct cfq_data *cfqd)
                cfqd->hw_tag = 0;
 }
 
+static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+       struct cfq_io_context *cic = cfqd->active_cic;
+
+       /* If there are other queues in the group, don't wait */
+       if (cfqq->cfqg->nr_cfqq > 1)
+               return false;
+
+       if (cfq_slice_used(cfqq))
+               return true;
+
+       /* if slice left is less than think time, wait busy */
+       if (cic && sample_valid(cic->ttime_samples)
+           && (cfqq->slice_end - jiffies < cic->ttime_mean))
+               return true;
+
+       /*
+        * If think times is less than a jiffy than ttime_mean=0 and above
+        * will not be true. It might happen that slice has not expired yet
+        * but will expire soon (4-5 ns) during select_queue(). To cover the
+        * case where think time is less than a jiffy, mark the queue wait
+        * busy if only 1 jiffy is left in the slice.
+        */
+       if (cfqq->slice_end - jiffies == 1)
+               return true;
+
+       return false;
+}
+
 static void cfq_completed_request(struct request_queue *q, struct request *rq)
 {
        struct cfq_queue *cfqq = RQ_CFQQ(rq);
@@ -3272,7 +3321,8 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
 
        if (sync) {
                RQ_CIC(rq)->last_end_request = now;
-               cfqd->last_end_sync_rq = now;
+               if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
+                       cfqd->last_delayed_sync = now;
        }
 
        /*
@@ -3288,11 +3338,10 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
                }
 
                /*
-                * If this queue consumed its slice and this is last queue
-                * in the group, wait for next request before we expire
-                * the queue
+                * Should we wait for next request to come in before we expire
+                * the queue.
                 */
-               if (cfq_slice_used(cfqq) && cfqq->cfqg->nr_cfqq == 1) {
+               if (cfq_should_wait_busy(cfqd, cfqq)) {
                        cfqq->slice_end = jiffies + cfqd->cfq_slice_idle;
                        cfq_mark_cfqq_wait_busy(cfqq);
                }
@@ -3710,7 +3759,7 @@ static void *cfq_init_queue(struct request_queue *q)
        cfqd->cfq_latency = 1;
        cfqd->cfq_group_isolation = 0;
        cfqd->hw_tag = -1;
-       cfqd->last_end_sync_rq = jiffies;
+       cfqd->last_delayed_sync = jiffies - HZ;
        INIT_RCU_HEAD(&cfqd->rcu);
        return cfqd;
 }