nfs: new subdir Documentation/filesystems/nfs
[safe/jmp/linux-2.6] / block / cfq-iosched.c
index a34686f..069a610 100644 (file)
@@ -48,7 +48,7 @@ static int cfq_slice_idle = HZ / 125;
 static struct kmem_cache *cfq_pool;
 static struct kmem_cache *cfq_ioc_pool;
 
-static DEFINE_PER_CPU(unsigned long, ioc_count);
+static DEFINE_PER_CPU(unsigned long, cfq_ioc_count);
 static struct completion *ioc_gone;
 static DEFINE_SPINLOCK(ioc_gone_lock);
 
@@ -173,6 +173,7 @@ struct cfq_data {
        unsigned int cfq_slice[2];
        unsigned int cfq_slice_async_rq;
        unsigned int cfq_slice_idle;
+       unsigned int cfq_latency;
 
        struct list_head cic_list;
 
@@ -180,6 +181,8 @@ struct cfq_data {
         * Fallback dummy cfqq for extreme OOM conditions
         */
        struct cfq_queue oom_cfqq;
+
+       unsigned long last_end_sync_rq;
 };
 
 enum cfqq_state_flags {
@@ -227,7 +230,7 @@ CFQ_CFQQ_FNS(coop);
        blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
 
 static void cfq_dispatch_insert(struct request_queue *, struct request *);
-static struct cfq_queue *cfq_get_queue(struct cfq_data *, int,
+static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
                                       struct io_context *, gfp_t);
 static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
                                                struct io_context *);
@@ -238,27 +241,24 @@ static inline int rq_in_driver(struct cfq_data *cfqd)
 }
 
 static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
-                                           int is_sync)
+                                           bool is_sync)
 {
-       return cic->cfqq[!!is_sync];
+       return cic->cfqq[is_sync];
 }
 
 static inline void cic_set_cfqq(struct cfq_io_context *cic,
-                               struct cfq_queue *cfqq, int is_sync)
+                               struct cfq_queue *cfqq, bool is_sync)
 {
-       cic->cfqq[!!is_sync] = cfqq;
+       cic->cfqq[is_sync] = cfqq;
 }
 
 /*
  * We regard a request as SYNC, if it's either a read or has the SYNC bit
  * set (in which case it could also be direct WRITE).
  */
-static inline int cfq_bio_sync(struct bio *bio)
+static inline bool cfq_bio_sync(struct bio *bio)
 {
-       if (bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO))
-               return 1;
-
-       return 0;
+       return bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO);
 }
 
 /*
@@ -285,7 +285,7 @@ static int cfq_queue_empty(struct request_queue *q)
  * if a queue is marked sync and has sync io queued. A sync queue with async
  * io only, should not get full sync slice length.
  */
-static inline int cfq_prio_slice(struct cfq_data *cfqd, int sync,
+static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
                                 unsigned short prio)
 {
        const int base_slice = cfqd->cfq_slice[sync];
@@ -313,7 +313,7 @@ cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  * isn't valid until the first request from the dispatch is activated
  * and the slice time set.
  */
-static inline int cfq_slice_used(struct cfq_queue *cfqq)
+static inline bool cfq_slice_used(struct cfq_queue *cfqq)
 {
        if (cfq_cfqq_slice_new(cfqq))
                return 0;
@@ -488,7 +488,7 @@ static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
  * we will service the queues.
  */
 static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
-                                int add_front)
+                                bool add_front)
 {
        struct rb_node **p, *parent;
        struct cfq_queue *__cfqq;
@@ -504,11 +504,20 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                } else
                        rb_key += jiffies;
        } else if (!add_front) {
+               /*
+                * Get our rb key offset. Subtract any residual slice
+                * value carried from last service. A negative resid
+                * count indicates slice overrun, and this should position
+                * the next service time further away in the tree.
+                */
                rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
-               rb_key += cfqq->slice_resid;
+               rb_key -= cfqq->slice_resid;
                cfqq->slice_resid = 0;
-       } else
-               rb_key = 0;
+       } else {
+               rb_key = -HZ;
+               __cfqq = cfq_rb_first(&cfqd->service_tree);
+               rb_key += __cfqq ? __cfqq->rb_key : jiffies;
+       }
 
        if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
                /*
@@ -542,7 +551,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                        n = &(*p)->rb_left;
                else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq))
                        n = &(*p)->rb_right;
-               else if (rb_key < __cfqq->rb_key)
+               else if (time_before(rb_key, __cfqq->rb_key))
                        n = &(*p)->rb_left;
                else
                        n = &(*p)->rb_right;
@@ -822,8 +831,10 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
         * reposition in fifo if next is older than rq
         */
        if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
-           time_before(next->start_time, rq->start_time))
+           time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
                list_move(&rq->queuelist, &next->queuelist);
+               rq_set_fifo_time(rq, rq_fifo_time(next));
+       }
 
        cfq_remove_request(next);
 }
@@ -839,7 +850,7 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
         * Disallow merge of a sync bio into an async request.
         */
        if (cfq_bio_sync(bio) && !rq_is_sync(rq))
-               return 0;
+               return false;
 
        /*
         * Lookup the cfqq that this bio will be queued with. Allow
@@ -847,13 +858,10 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
         */
        cic = cfq_cic_lookup(cfqd, current->io_context);
        if (!cic)
-               return 0;
+               return false;
 
        cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
-       if (cfqq == RQ_CFQQ(rq))
-               return 1;
-
-       return 0;
+       return cfqq == RQ_CFQQ(rq);
 }
 
 static void __cfq_set_active_queue(struct cfq_data *cfqd,
@@ -881,7 +889,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
  */
 static void
 __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
-                   int timed_out)
+                   bool timed_out)
 {
        cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
 
@@ -909,7 +917,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
        }
 }
 
-static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out)
+static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
 {
        struct cfq_queue *cfqq = cfqd->active_queue;
 
@@ -1021,7 +1029,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
  */
 static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
                                              struct cfq_queue *cur_cfqq,
-                                             int probe)
+                                             bool probe)
 {
        struct cfq_queue *cfqq;
 
@@ -1085,6 +1093,15 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
        if (!cic || !atomic_read(&cic->ioc->nr_tasks))
                return;
 
+       /*
+        * If our average think time is larger than the remaining time
+        * slice, then don't idle. This avoids overrunning the allotted
+        * time slice.
+        */
+       if (sample_valid(cic->ttime_samples) &&
+           (cfqq->slice_end - jiffies < cic->ttime_mean))
+               return;
+
        cfq_mark_cfqq_wait_request(cfqq);
 
        /*
@@ -1110,6 +1127,7 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
 
        cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
 
+       cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
        cfq_remove_request(rq);
        cfqq->dispatched++;
        elv_dispatch_sort(q, rq);
@@ -1123,9 +1141,7 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
  */
 static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
 {
-       struct cfq_data *cfqd = cfqq->cfqd;
-       struct request *rq;
-       int fifo;
+       struct request *rq = NULL;
 
        if (cfq_cfqq_fifo_expire(cfqq))
                return NULL;
@@ -1135,13 +1151,11 @@ static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
        if (list_empty(&cfqq->fifo))
                return NULL;
 
-       fifo = cfq_cfqq_sync(cfqq);
        rq = rq_entry_fifo(cfqq->fifo.next);
-
-       if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo]))
+       if (time_before(jiffies, rq_fifo_time(rq)))
                rq = NULL;
 
-       cfq_log_cfqq(cfqd, cfqq, "fifo=%p", rq);
+       cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
        return rq;
 }
 
@@ -1242,16 +1256,83 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd)
        return dispatched;
 }
 
+static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+       unsigned int max_dispatch;
+
+       /*
+        * Drain async requests before we start sync IO
+        */
+       if (cfq_cfqq_idle_window(cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC])
+               return false;
+
+       /*
+        * If this is an async queue and we have sync IO in flight, let it wait
+        */
+       if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
+               return false;
+
+       max_dispatch = cfqd->cfq_quantum;
+       if (cfq_class_idle(cfqq))
+               max_dispatch = 1;
+
+       /*
+        * Does this cfqq already have too much IO in flight?
+        */
+       if (cfqq->dispatched >= max_dispatch) {
+               /*
+                * idle queue must always only have a single IO in flight
+                */
+               if (cfq_class_idle(cfqq))
+                       return false;
+
+               /*
+                * We have other queues, don't allow more IO from this one
+                */
+               if (cfqd->busy_queues > 1)
+                       return false;
+
+               /*
+                * Sole queue user, allow bigger slice
+                */
+               max_dispatch *= 4;
+       }
+
+       /*
+        * Async queues must wait a bit before being allowed dispatch.
+        * We also ramp up the dispatch depth gradually for async IO,
+        * based on the last sync IO we serviced
+        */
+       if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
+               unsigned long last_sync = jiffies - cfqd->last_end_sync_rq;
+               unsigned int depth;
+
+               depth = last_sync / cfqd->cfq_slice[1];
+               if (!depth && !cfqq->dispatched)
+                       depth = 1;
+               if (depth < max_dispatch)
+                       max_dispatch = depth;
+       }
+
+       /*
+        * If we're below the current max, allow a dispatch
+        */
+       return cfqq->dispatched < max_dispatch;
+}
+
 /*
  * Dispatch a request from cfqq, moving them to the request queue
  * dispatch list.
  */
-static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 {
        struct request *rq;
 
        BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
 
+       if (!cfq_may_dispatch(cfqd, cfqq))
+               return false;
+
        /*
         * follow expired path, else get first next available
         */
@@ -1270,6 +1351,8 @@ static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
                atomic_long_inc(&cic->ioc->refcount);
                cfqd->active_cic = cic;
        }
+
+       return true;
 }
 
 /*
@@ -1280,7 +1363,6 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
 {
        struct cfq_data *cfqd = q->elevator->elevator_data;
        struct cfq_queue *cfqq;
-       unsigned int max_dispatch;
 
        if (!cfqd->busy_queues)
                return 0;
@@ -1293,48 +1375,11 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
                return 0;
 
        /*
-        * Drain async requests before we start sync IO
+        * Dispatch a request from this cfqq, if it is allowed
         */
-       if (cfq_cfqq_idle_window(cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC])
+       if (!cfq_dispatch_request(cfqd, cfqq))
                return 0;
 
-       /*
-        * If this is an async queue and we have sync IO in flight, let it wait
-        */
-       if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
-               return 0;
-
-       max_dispatch = cfqd->cfq_quantum;
-       if (cfq_class_idle(cfqq))
-               max_dispatch = 1;
-
-       /*
-        * Does this cfqq already have too much IO in flight?
-        */
-       if (cfqq->dispatched >= max_dispatch) {
-               /*
-                * idle queue must always only have a single IO in flight
-                */
-               if (cfq_class_idle(cfqq))
-                       return 0;
-
-               /*
-                * We have other queues, don't allow more IO from this one
-                */
-               if (cfqd->busy_queues > 1)
-                       return 0;
-
-               /*
-                * we are the only queue, allow up to 4 times of 'quantum'
-                */
-               if (cfqq->dispatched >= 4 * max_dispatch)
-                       return 0;
-       }
-
-       /*
-        * Dispatch a request from this cfqq
-        */
-       cfq_dispatch_request(cfqd, cfqq);
        cfqq->slice_dispatch++;
        cfq_clear_cfqq_must_dispatch(cfqq);
 
@@ -1414,7 +1459,7 @@ static void cfq_cic_free_rcu(struct rcu_head *head)
        cic = container_of(head, struct cfq_io_context, rcu_head);
 
        kmem_cache_free(cfq_ioc_pool, cic);
-       elv_ioc_count_dec(ioc_count);
+       elv_ioc_count_dec(cfq_ioc_count);
 
        if (ioc_gone) {
                /*
@@ -1423,7 +1468,7 @@ static void cfq_cic_free_rcu(struct rcu_head *head)
                 * complete ioc_gone and set it back to NULL
                 */
                spin_lock(&ioc_gone_lock);
-               if (ioc_gone && !elv_ioc_count_read(ioc_count)) {
+               if (ioc_gone && !elv_ioc_count_read(cfq_ioc_count)) {
                        complete(ioc_gone);
                        ioc_gone = NULL;
                }
@@ -1549,7 +1594,7 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
                INIT_HLIST_NODE(&cic->cic_list);
                cic->dtor = cfq_free_io_context;
                cic->exit = cfq_exit_io_context;
-               elv_ioc_count_inc(ioc_count);
+               elv_ioc_count_inc(cfq_ioc_count);
        }
 
        return cic;
@@ -1634,7 +1679,7 @@ static void cfq_ioc_set_ioprio(struct io_context *ioc)
 }
 
 static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
-                         pid_t pid, int is_sync)
+                         pid_t pid, bool is_sync)
 {
        RB_CLEAR_NODE(&cfqq->rb_node);
        RB_CLEAR_NODE(&cfqq->p_node);
@@ -1654,7 +1699,7 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 }
 
 static struct cfq_queue *
-cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync,
+cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
                     struct io_context *ioc, gfp_t gfp_mask)
 {
        struct cfq_queue *cfqq, *new_cfqq = NULL;
@@ -1718,7 +1763,7 @@ cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
 }
 
 static struct cfq_queue *
-cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct io_context *ioc,
+cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
              gfp_t gfp_mask)
 {
        const int ioprio = task_ioprio(ioc);
@@ -1950,10 +1995,13 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
        enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
 
        if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
-           (cfqd->hw_tag && CIC_SEEKY(cic)))
+           (!cfqd->cfq_latency && cfqd->hw_tag && CIC_SEEKY(cic)))
                enable_idle = 0;
        else if (sample_valid(cic->ttime_samples)) {
-               if (cic->ttime_mean > cfqd->cfq_slice_idle)
+               unsigned int slice_idle = cfqd->cfq_slice_idle;
+               if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
+                       slice_idle = msecs_to_jiffies(CFQ_MIN_TT);
+               if (cic->ttime_mean > slice_idle)
                        enable_idle = 0;
                else
                        enable_idle = 1;
@@ -1972,7 +2020,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  * Check if new_cfqq should preempt the currently active queue. Return 0 for
  * no or if we aren't sure, a 1 will cause a preempt.
  */
-static int
+static bool
 cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
                   struct request *rq)
 {
@@ -1980,48 +2028,48 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
 
        cfqq = cfqd->active_queue;
        if (!cfqq)
-               return 0;
+               return false;
 
        if (cfq_slice_used(cfqq))
-               return 1;
+               return true;
 
        if (cfq_class_idle(new_cfqq))
-               return 0;
+               return false;
 
        if (cfq_class_idle(cfqq))
-               return 1;
+               return true;
 
        /*
         * if the new request is sync, but the currently running queue is
         * not, let the sync request have priority.
         */
        if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
-               return 1;
+               return true;
 
        /*
         * So both queues are sync. Let the new request get disk time if
         * it's a metadata request and the current queue is doing regular IO.
         */
        if (rq_is_meta(rq) && !cfqq->meta_pending)
-               return 1;
+               return false;
 
        /*
         * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
         */
        if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
-               return 1;
+               return true;
 
        if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
-               return 0;
+               return false;
 
        /*
         * if this request is as-good as one we would expect from the
         * current cfqq, let it preempt
         */
        if (cfq_rq_close(cfqd, rq))
-               return 1;
+               return true;
 
-       return 0;
+       return false;
 }
 
 /*
@@ -2106,6 +2154,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
 
        cfq_add_rq_rb(rq);
 
+       rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
        list_add_tail(&rq->queuelist, &cfqq->fifo);
 
        cfq_rq_enqueued(cfqd, cfqq, rq);
@@ -2156,8 +2205,10 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
        if (cfq_cfqq_sync(cfqq))
                cfqd->sync_flight--;
 
-       if (sync)
+       if (sync) {
                RQ_CIC(rq)->last_end_request = now;
+               cfqd->last_end_sync_rq = now;
+       }
 
        /*
         * If this is the active queue, check if it needs to be expired,
@@ -2283,7 +2334,7 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
        struct cfq_data *cfqd = q->elevator->elevator_data;
        struct cfq_io_context *cic;
        const int rw = rq_data_dir(rq);
-       const int is_sync = rq_is_sync(rq);
+       const bool is_sync = rq_is_sync(rq);
        struct cfq_queue *cfqq;
        unsigned long flags;
 
@@ -2479,8 +2530,9 @@ static void *cfq_init_queue(struct request_queue *q)
        cfqd->cfq_slice[1] = cfq_slice_sync;
        cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
        cfqd->cfq_slice_idle = cfq_slice_idle;
+       cfqd->cfq_latency = 1;
        cfqd->hw_tag = 1;
-
+       cfqd->last_end_sync_rq = jiffies;
        return cfqd;
 }
 
@@ -2548,6 +2600,7 @@ SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
+SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
 #undef SHOW_FUNCTION
 
 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                        \
@@ -2579,6 +2632,7 @@ STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
                UINT_MAX, 0);
+STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
 #undef STORE_FUNCTION
 
 #define CFQ_ATTR(name) \
@@ -2594,6 +2648,7 @@ static struct elv_fs_entry cfq_attrs[] = {
        CFQ_ATTR(slice_async),
        CFQ_ATTR(slice_async_rq),
        CFQ_ATTR(slice_idle),
+       CFQ_ATTR(low_latency),
        __ATTR_NULL
 };
 
@@ -2653,7 +2708,7 @@ static void __exit cfq_exit(void)
         * this also protects us from entering cfq_slab_kill() with
         * pending RCU callbacks
         */
-       if (elv_ioc_count_read(ioc_count))
+       if (elv_ioc_count_read(cfq_ioc_count))
                wait_for_completion(&all_gone);
        cfq_slab_kill();
 }