nfsd4: remove unused dl_trunc
[safe/jmp/linux-2.6] / block / cfq-iosched.c
index 03a5953..0d3b70d 100644 (file)
@@ -56,9 +56,6 @@ static DEFINE_SPINLOCK(ioc_gone_lock);
 #define cfq_class_idle(cfqq)   ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
 #define cfq_class_rt(cfqq)     ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
 
-#define ASYNC                  (0)
-#define SYNC                   (1)
-
 #define sample_valid(samples)  ((samples) > 80)
 
 /*
@@ -83,7 +80,20 @@ struct cfq_data {
         * rr list of queues with requests and the count of them
         */
        struct cfq_rb_root service_tree;
+
+       /*
+        * Each priority tree is sorted by next_request position.  These
+        * trees are used when determining if two or more queues are
+        * interleaving requests (see cfq_close_cooperator).
+        */
+       struct rb_root prio_trees[CFQ_PRIO_LISTS];
+
        unsigned int busy_queues;
+       /*
+        * Used to track any pending rt requests so we can pre-empt current
+        * non-RT cfqq in service when this value is non-zero.
+        */
+       unsigned int busy_rt_queues;
 
        int rq_in_driver;
        int sync_flight;
@@ -142,6 +152,8 @@ struct cfq_queue {
        struct rb_node rb_node;
        /* service_tree key */
        unsigned long rb_key;
+       /* prio tree member */
+       struct rb_node p_node;
        /* sorted list of pending requests */
        struct rb_root sort_list;
        /* if fifo isn't expired, next request to serve */
@@ -155,6 +167,7 @@ struct cfq_queue {
 
        unsigned long slice_end;
        long slice_resid;
+       unsigned int slice_dispatch;
 
        /* pending metadata requests */
        int meta_pending;
@@ -171,15 +184,15 @@ struct cfq_queue {
 enum cfqq_state_flags {
        CFQ_CFQQ_FLAG_on_rr = 0,        /* on round-robin busy list */
        CFQ_CFQQ_FLAG_wait_request,     /* waiting for a request */
+       CFQ_CFQQ_FLAG_must_dispatch,    /* must be allowed a dispatch */
        CFQ_CFQQ_FLAG_must_alloc,       /* must be allowed rq alloc */
        CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
-       CFQ_CFQQ_FLAG_must_dispatch,    /* must dispatch, even if expired */
        CFQ_CFQQ_FLAG_fifo_expire,      /* FIFO checked in this slice */
        CFQ_CFQQ_FLAG_idle_window,      /* slice idling enabled */
        CFQ_CFQQ_FLAG_prio_changed,     /* task priority has changed */
-       CFQ_CFQQ_FLAG_queue_new,        /* queue never been serviced */
        CFQ_CFQQ_FLAG_slice_new,        /* no requests dispatched in slice */
        CFQ_CFQQ_FLAG_sync,             /* synchronous queue */
+       CFQ_CFQQ_FLAG_coop,             /* has done a coop jump of the queue */
 };
 
 #define CFQ_CFQQ_FNS(name)                                             \
@@ -198,15 +211,15 @@ static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)           \
 
 CFQ_CFQQ_FNS(on_rr);
 CFQ_CFQQ_FNS(wait_request);
+CFQ_CFQQ_FNS(must_dispatch);
 CFQ_CFQQ_FNS(must_alloc);
 CFQ_CFQQ_FNS(must_alloc_slice);
-CFQ_CFQQ_FNS(must_dispatch);
 CFQ_CFQQ_FNS(fifo_expire);
 CFQ_CFQQ_FNS(idle_window);
 CFQ_CFQQ_FNS(prio_changed);
-CFQ_CFQQ_FNS(queue_new);
 CFQ_CFQQ_FNS(slice_new);
 CFQ_CFQQ_FNS(sync);
+CFQ_CFQQ_FNS(coop);
 #undef CFQ_CFQQ_FNS
 
 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
@@ -415,13 +428,17 @@ static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
        return NULL;
 }
 
+static void rb_erase_init(struct rb_node *n, struct rb_root *root)
+{
+       rb_erase(n, root);
+       RB_CLEAR_NODE(n);
+}
+
 static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
 {
        if (root->left == n)
                root->left = NULL;
-
-       rb_erase(n, &root->rb);
-       RB_CLEAR_NODE(n);
+       rb_erase_init(n, &root->rb);
 }
 
 /*
@@ -466,8 +483,8 @@ static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
  * requests waiting to be processed. It is sorted in the order that
  * we will service the queues.
  */
-static void cfq_service_tree_add(struct cfq_data *cfqd,
-                                   struct cfq_queue *cfqq, int add_front)
+static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
+                                int add_front)
 {
        struct rb_node **p, *parent;
        struct cfq_queue *__cfqq;
@@ -540,6 +557,63 @@ static void cfq_service_tree_add(struct cfq_data *cfqd,
        rb_insert_color(&cfqq->rb_node, &cfqd->service_tree.rb);
 }
 
+static struct cfq_queue *
+cfq_prio_tree_lookup(struct cfq_data *cfqd, int ioprio, sector_t sector,
+                    struct rb_node **ret_parent, struct rb_node ***rb_link)
+{
+       struct rb_root *root = &cfqd->prio_trees[ioprio];
+       struct rb_node **p, *parent;
+       struct cfq_queue *cfqq = NULL;
+
+       parent = NULL;
+       p = &root->rb_node;
+       while (*p) {
+               struct rb_node **n;
+
+               parent = *p;
+               cfqq = rb_entry(parent, struct cfq_queue, p_node);
+
+               /*
+                * Sort strictly based on sector.  Smallest to the left,
+                * largest to the right.
+                */
+               if (sector > cfqq->next_rq->sector)
+                       n = &(*p)->rb_right;
+               else if (sector < cfqq->next_rq->sector)
+                       n = &(*p)->rb_left;
+               else
+                       break;
+               p = n;
+       }
+
+       *ret_parent = parent;
+       if (rb_link)
+               *rb_link = p;
+       return NULL;
+}
+
+static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+       struct rb_root *root = &cfqd->prio_trees[cfqq->ioprio];
+       struct rb_node **p, *parent;
+       struct cfq_queue *__cfqq;
+
+       if (!RB_EMPTY_NODE(&cfqq->p_node))
+               rb_erase_init(&cfqq->p_node, root);
+
+       if (cfq_class_idle(cfqq))
+               return;
+       if (!cfqq->next_rq)
+               return;
+
+       __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->ioprio, cfqq->next_rq->sector,
+                                        &parent, &p);
+       BUG_ON(__cfqq);
+
+       rb_link_node(&cfqq->p_node, parent, p);
+       rb_insert_color(&cfqq->p_node, root);
+}
+
 /*
  * Update cfqq's position in the service tree.
  */
@@ -548,8 +622,10 @@ static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
        /*
         * Resorting requires the cfqq to be on the RR list already.
         */
-       if (cfq_cfqq_on_rr(cfqq))
+       if (cfq_cfqq_on_rr(cfqq)) {
                cfq_service_tree_add(cfqd, cfqq, 0);
+               cfq_prio_tree_add(cfqd, cfqq);
+       }
 }
 
 /*
@@ -562,6 +638,8 @@ static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
        BUG_ON(cfq_cfqq_on_rr(cfqq));
        cfq_mark_cfqq_on_rr(cfqq);
        cfqd->busy_queues++;
+       if (cfq_class_rt(cfqq))
+               cfqd->busy_rt_queues++;
 
        cfq_resort_rr_list(cfqd, cfqq);
 }
@@ -578,9 +656,13 @@ static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 
        if (!RB_EMPTY_NODE(&cfqq->rb_node))
                cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
+       if (!RB_EMPTY_NODE(&cfqq->p_node))
+               rb_erase_init(&cfqq->p_node, &cfqd->prio_trees[cfqq->ioprio]);
 
        BUG_ON(!cfqd->busy_queues);
        cfqd->busy_queues--;
+       if (cfq_class_rt(cfqq))
+               cfqd->busy_rt_queues--;
 }
 
 /*
@@ -605,7 +687,7 @@ static void cfq_add_rq_rb(struct request *rq)
 {
        struct cfq_queue *cfqq = RQ_CFQQ(rq);
        struct cfq_data *cfqd = cfqq->cfqd;
-       struct request *__alias;
+       struct request *__alias, *prev;
 
        cfqq->queued[rq_is_sync(rq)]++;
 
@@ -622,7 +704,15 @@ static void cfq_add_rq_rb(struct request *rq)
        /*
         * check if this request is a better next-serve candidate
         */
+       prev = cfqq->next_rq;
        cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq);
+
+       /*
+        * adjust priority tree position, if ->next_rq changes
+        */
+       if (prev != cfqq->next_rq)
+               cfq_prio_tree_add(cfqd, cfqq);
+
        BUG_ON(!cfqq->next_rq);
 }
 
@@ -765,10 +855,15 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
        if (cfqq) {
                cfq_log_cfqq(cfqd, cfqq, "set_active");
                cfqq->slice_end = 0;
+               cfqq->slice_dispatch = 0;
+
+               cfq_clear_cfqq_wait_request(cfqq);
+               cfq_clear_cfqq_must_dispatch(cfqq);
                cfq_clear_cfqq_must_alloc_slice(cfqq);
                cfq_clear_cfqq_fifo_expire(cfqq);
                cfq_mark_cfqq_slice_new(cfqq);
-               cfq_clear_cfqq_queue_new(cfqq);
+
+               del_timer(&cfqd->idle_slice_timer);
        }
 
        cfqd->active_queue = cfqq;
@@ -786,7 +881,6 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
        if (cfq_cfqq_wait_request(cfqq))
                del_timer(&cfqd->idle_slice_timer);
 
-       cfq_clear_cfqq_must_dispatch(cfqq);
        cfq_clear_cfqq_wait_request(cfqq);
 
        /*
@@ -831,11 +925,15 @@ static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
 /*
  * Get and set a new active queue for service.
  */
-static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
+static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
+                                             struct cfq_queue *cfqq)
 {
-       struct cfq_queue *cfqq;
+       if (!cfqq) {
+               cfqq = cfq_get_next_queue(cfqd);
+               if (cfqq)
+                       cfq_clear_cfqq_coop(cfqq);
+       }
 
-       cfqq = cfq_get_next_queue(cfqd);
        __cfq_set_active_queue(cfqd, cfqq);
        return cfqq;
 }
@@ -859,17 +957,89 @@ static inline int cfq_rq_close(struct cfq_data *cfqd, struct request *rq)
        return cfq_dist_from_last(cfqd, rq) <= cic->seek_mean;
 }
 
-static int cfq_close_cooperator(struct cfq_data *cfq_data,
-                               struct cfq_queue *cfqq)
+static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
+                                   struct cfq_queue *cur_cfqq)
 {
+       struct rb_root *root = &cfqd->prio_trees[cur_cfqq->ioprio];
+       struct rb_node *parent, *node;
+       struct cfq_queue *__cfqq;
+       sector_t sector = cfqd->last_position;
+
+       if (RB_EMPTY_ROOT(root))
+               return NULL;
+
+       /*
+        * First, if we find a request starting at the end of the last
+        * request, choose it.
+        */
+       __cfqq = cfq_prio_tree_lookup(cfqd, cur_cfqq->ioprio,
+                                     sector, &parent, NULL);
+       if (__cfqq)
+               return __cfqq;
+
+       /*
+        * If the exact sector wasn't found, the parent of the NULL leaf
+        * will contain the closest sector.
+        */
+       __cfqq = rb_entry(parent, struct cfq_queue, p_node);
+       if (cfq_rq_close(cfqd, __cfqq->next_rq))
+               return __cfqq;
+
+       if (__cfqq->next_rq->sector < sector)
+               node = rb_next(&__cfqq->p_node);
+       else
+               node = rb_prev(&__cfqq->p_node);
+       if (!node)
+               return NULL;
+
+       __cfqq = rb_entry(node, struct cfq_queue, p_node);
+       if (cfq_rq_close(cfqd, __cfqq->next_rq))
+               return __cfqq;
+
+       return NULL;
+}
+
+/*
+ * cfqd - obvious
+ * cur_cfqq - passed in so that we don't decide that the current queue is
+ *           closely cooperating with itself.
+ *
+ * So, basically we're assuming that that cur_cfqq has dispatched at least
+ * one request, and that cfqd->last_position reflects a position on the disk
+ * associated with the I/O issued by cur_cfqq.  I'm not sure this is a valid
+ * assumption.
+ */
+static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
+                                             struct cfq_queue *cur_cfqq,
+                                             int probe)
+{
+       struct cfq_queue *cfqq;
+
+       /*
+        * A valid cfq_io_context is necessary to compare requests against
+        * the seek_mean of the current cfqq.
+        */
+       if (!cfqd->active_cic)
+               return NULL;
+
        /*
         * We should notice if some of the queues are cooperating, eg
         * working closely on the same area of the disk. In that case,
         * we can group them together and don't waste time idling.
         */
-       return 0;
+       cfqq = cfqq_close(cfqd, cur_cfqq);
+       if (!cfqq)
+               return NULL;
+
+       if (cfq_cfqq_coop(cfqq))
+               return NULL;
+
+       if (!probe)
+               cfq_mark_cfqq_coop(cfqq);
+       return cfqq;
 }
 
+
 #define CIC_SEEKY(cic) ((cic)->seek_mean > (8 * 1024))
 
 static void cfq_arm_slice_timer(struct cfq_data *cfqd)
@@ -879,9 +1049,11 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
        unsigned long sl;
 
        /*
-        * SSD device without seek penalty, disable idling
+        * SSD device without seek penalty, disable idling. But only do so
+        * for devices that support queuing, otherwise we still have a problem
+        * with sync vs async workloads.
         */
-       if (blk_queue_nonrot(cfqd->queue))
+       if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
                return;
 
        WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
@@ -906,14 +1078,6 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
        if (!cic || !atomic_read(&cic->ioc->nr_tasks))
                return;
 
-       /*
-        * See if this prio level has a good candidate
-        */
-       if (cfq_close_cooperator(cfqd, cfqq) &&
-           (sample_valid(cic->ttime_samples) && cic->ttime_mean > 2))
-               return;
-
-       cfq_mark_cfqq_must_dispatch(cfqq);
        cfq_mark_cfqq_wait_request(cfqq);
 
        /*
@@ -926,7 +1090,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
                sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT));
 
        mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
-       cfq_log(cfqd, "arm_idle: %lu", sl);
+       cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl);
 }
 
 /*
@@ -990,7 +1154,7 @@ cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  */
 static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
 {
-       struct cfq_queue *cfqq;
+       struct cfq_queue *cfqq, *new_cfqq = NULL;
 
        cfqq = cfqd->active_queue;
        if (!cfqq)
@@ -999,10 +1163,24 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
        /*
         * The active queue has run out of time, expire it and select new.
         */
-       if (cfq_slice_used(cfqq))
+       if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq))
                goto expire;
 
        /*
+        * If we have a RT cfqq waiting, then we pre-empt the current non-rt
+        * cfqq.
+        */
+       if (!cfq_class_rt(cfqq) && cfqd->busy_rt_queues) {
+               /*
+                * We simulate this as cfqq timed out so that it gets to bank
+                * the remaining of its time slice.
+                */
+               cfq_log_cfqq(cfqd, cfqq, "preempt");
+               cfq_slice_expired(cfqd, 1);
+               goto new_queue;
+       }
+
+       /*
         * The active queue has requests and isn't expired, allow it to
         * dispatch.
         */
@@ -1010,6 +1188,16 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
                goto keep_queue;
 
        /*
+        * If another queue has a request waiting within our mean seek
+        * distance, let it run.  The expire code will check for close
+        * cooperators and put the close queue at the front of the service
+        * tree.
+        */
+       new_cfqq = cfq_close_cooperator(cfqd, cfqq, 0);
+       if (new_cfqq)
+               goto expire;
+
+       /*
         * No requests pending. If the active queue still has requests in
         * flight or is idling for a new request, allow either of these
         * conditions to happen (or time out) before selecting a new queue.
@@ -1023,64 +1211,11 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
 expire:
        cfq_slice_expired(cfqd, 0);
 new_queue:
-       cfqq = cfq_set_active_queue(cfqd);
+       cfqq = cfq_set_active_queue(cfqd, new_cfqq);
 keep_queue:
        return cfqq;
 }
 
-/*
- * Dispatch some requests from cfqq, moving them to the request queue
- * dispatch list.
- */
-static int
-__cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
-                       int max_dispatch)
-{
-       int dispatched = 0;
-
-       BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
-
-       do {
-               struct request *rq;
-
-               /*
-                * follow expired path, else get first next available
-                */
-               rq = cfq_check_fifo(cfqq);
-               if (rq == NULL)
-                       rq = cfqq->next_rq;
-
-               /*
-                * finally, insert request into driver dispatch list
-                */
-               cfq_dispatch_insert(cfqd->queue, rq);
-
-               dispatched++;
-
-               if (!cfqd->active_cic) {
-                       atomic_inc(&RQ_CIC(rq)->ioc->refcount);
-                       cfqd->active_cic = RQ_CIC(rq);
-               }
-
-               if (RB_EMPTY_ROOT(&cfqq->sort_list))
-                       break;
-
-       } while (dispatched < max_dispatch);
-
-       /*
-        * expire an async queue immediately if it has used up its slice. idle
-        * queue always expire after 1 dispatch round.
-        */
-       if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
-           dispatched >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
-           cfq_class_idle(cfqq))) {
-               cfqq->slice_end = jiffies + 1;
-               cfq_slice_expired(cfqd, 0);
-       }
-
-       return dispatched;
-}
-
 static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
 {
        int dispatched = 0;
@@ -1114,11 +1249,45 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd)
        return dispatched;
 }
 
+/*
+ * Dispatch a request from cfqq, moving them to the request queue
+ * dispatch list.
+ */
+static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+       struct request *rq;
+
+       BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
+
+       /*
+        * follow expired path, else get first next available
+        */
+       rq = cfq_check_fifo(cfqq);
+       if (!rq)
+               rq = cfqq->next_rq;
+
+       /*
+        * insert request into driver dispatch list
+        */
+       cfq_dispatch_insert(cfqd->queue, rq);
+
+       if (!cfqd->active_cic) {
+               struct cfq_io_context *cic = RQ_CIC(rq);
+
+               atomic_inc(&cic->ioc->refcount);
+               cfqd->active_cic = cic;
+       }
+}
+
+/*
+ * Find the cfqq that we need to service and move a request from that to the
+ * dispatch list
+ */
 static int cfq_dispatch_requests(struct request_queue *q, int force)
 {
        struct cfq_data *cfqd = q->elevator->elevator_data;
        struct cfq_queue *cfqq;
-       int dispatched;
+       unsigned int max_dispatch;
 
        if (!cfqd->busy_queues)
                return 0;
@@ -1126,33 +1295,63 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
        if (unlikely(force))
                return cfq_forced_dispatch(cfqd);
 
-       dispatched = 0;
-       while ((cfqq = cfq_select_queue(cfqd)) != NULL) {
-               int max_dispatch;
+       cfqq = cfq_select_queue(cfqd);
+       if (!cfqq)
+               return 0;
+
+       /*
+        * If this is an async queue and we have sync IO in flight, let it wait
+        */
+       if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
+               return 0;
+
+       max_dispatch = cfqd->cfq_quantum;
+       if (cfq_class_idle(cfqq))
+               max_dispatch = 1;
 
-               max_dispatch = cfqd->cfq_quantum;
+       /*
+        * Does this cfqq already have too much IO in flight?
+        */
+       if (cfqq->dispatched >= max_dispatch) {
+               /*
+                * idle queue must always only have a single IO in flight
+                */
                if (cfq_class_idle(cfqq))
-                       max_dispatch = 1;
+                       return 0;
 
-               if (cfqq->dispatched >= max_dispatch) {
-                       if (cfqd->busy_queues > 1)
-                               break;
-                       if (cfqq->dispatched >= 4 * max_dispatch)
-                               break;
-               }
+               /*
+                * We have other queues, don't allow more IO from this one
+                */
+               if (cfqd->busy_queues > 1)
+                       return 0;
 
-               if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
-                       break;
+               /*
+                * we are the only queue, allow up to 4 times of 'quantum'
+                */
+               if (cfqq->dispatched >= 4 * max_dispatch)
+                       return 0;
+       }
 
-               cfq_clear_cfqq_must_dispatch(cfqq);
-               cfq_clear_cfqq_wait_request(cfqq);
-               del_timer(&cfqd->idle_slice_timer);
+       /*
+        * Dispatch a request from this cfqq
+        */
+       cfq_dispatch_request(cfqd, cfqq);
+       cfqq->slice_dispatch++;
+       cfq_clear_cfqq_must_dispatch(cfqq);
 
-               dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
+       /*
+        * expire an async queue immediately if it has used up its slice. idle
+        * queue always expire after 1 dispatch round.
+        */
+       if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
+           cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
+           cfq_class_idle(cfqq))) {
+               cfqq->slice_end = jiffies + 1;
+               cfq_slice_expired(cfqd, 0);
        }
 
-       cfq_log(cfqd, "dispatched=%d", dispatched);
-       return dispatched;
+       cfq_log(cfqd, "dispatched a request");
+       return 1;
 }
 
 /*
@@ -1295,14 +1494,14 @@ static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
        if (ioc->ioc_data == cic)
                rcu_assign_pointer(ioc->ioc_data, NULL);
 
-       if (cic->cfqq[ASYNC]) {
-               cfq_exit_cfqq(cfqd, cic->cfqq[ASYNC]);
-               cic->cfqq[ASYNC] = NULL;
+       if (cic->cfqq[BLK_RW_ASYNC]) {
+               cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
+               cic->cfqq[BLK_RW_ASYNC] = NULL;
        }
 
-       if (cic->cfqq[SYNC]) {
-               cfq_exit_cfqq(cfqd, cic->cfqq[SYNC]);
-               cic->cfqq[SYNC] = NULL;
+       if (cic->cfqq[BLK_RW_SYNC]) {
+               cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
+               cic->cfqq[BLK_RW_SYNC] = NULL;
        }
 }
 
@@ -1316,7 +1515,15 @@ static void cfq_exit_single_io_context(struct io_context *ioc,
                unsigned long flags;
 
                spin_lock_irqsave(q->queue_lock, flags);
-               __cfq_exit_single_io_context(cfqd, cic);
+
+               /*
+                * Ensure we get a fresh copy of the ->key to prevent
+                * race between exiting task and queue
+                */
+               smp_read_barrier_depends();
+               if (cic->key)
+                       __cfq_exit_single_io_context(cfqd, cic);
+
                spin_unlock_irqrestore(q->queue_lock, flags);
        }
 }
@@ -1403,17 +1610,18 @@ static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic)
 
        spin_lock_irqsave(cfqd->queue->queue_lock, flags);
 
-       cfqq = cic->cfqq[ASYNC];
+       cfqq = cic->cfqq[BLK_RW_ASYNC];
        if (cfqq) {
                struct cfq_queue *new_cfqq;
-               new_cfqq = cfq_get_queue(cfqd, ASYNC, cic->ioc, GFP_ATOMIC);
+               new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->ioc,
+                                               GFP_ATOMIC);
                if (new_cfqq) {
-                       cic->cfqq[ASYNC] = new_cfqq;
+                       cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
                        cfq_put_queue(cfqq);
                }
        }
 
-       cfqq = cic->cfqq[SYNC];
+       cfqq = cic->cfqq[BLK_RW_SYNC];
        if (cfqq)
                cfq_mark_cfqq_prio_changed(cfqq);
 
@@ -1464,13 +1672,13 @@ retry:
                }
 
                RB_CLEAR_NODE(&cfqq->rb_node);
+               RB_CLEAR_NODE(&cfqq->p_node);
                INIT_LIST_HEAD(&cfqq->fifo);
 
                atomic_set(&cfqq->ref, 0);
                cfqq->cfqd = cfqd;
 
                cfq_mark_cfqq_prio_changed(cfqq);
-               cfq_mark_cfqq_queue_new(cfqq);
 
                cfq_init_prio_data(cfqq, ioc);
 
@@ -1795,6 +2003,12 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
        if (rq_is_meta(rq) && !cfqq->meta_pending)
                return 1;
 
+       /*
+        * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
+        */
+       if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
+               return 1;
+
        if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
                return 0;
 
@@ -1851,23 +2065,31 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 
        if (cfqq == cfqd->active_queue) {
                /*
-                * if we are waiting for a request for this queue, let it rip
-                * immediately and flag that we must not expire this queue
-                * just now
+                * Remember that we saw a request from this process, but
+                * don't start queuing just yet. Otherwise we risk seeing lots
+                * of tiny requests, because we disrupt the normal plugging
+                * and merging. If the request is already larger than a single
+                * page, let it rip immediately. For that case we assume that
+                * merging is already done. Ditto for a busy system that
+                * has other work pending, don't risk delaying until the
+                * idle timer unplug to continue working.
                 */
                if (cfq_cfqq_wait_request(cfqq)) {
+                       if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
+                           cfqd->busy_queues > 1) {
+                               del_timer(&cfqd->idle_slice_timer);
+                               blk_start_queueing(cfqd->queue);
+                       }
                        cfq_mark_cfqq_must_dispatch(cfqq);
-                       del_timer(&cfqd->idle_slice_timer);
-                       blk_start_queueing(cfqd->queue);
                }
        } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
                /*
                 * not the active queue - expire current slice if it is
                 * idle and has expired it's mean thinktime or this new queue
-                * has some old slice time left and is of higher priority
+                * has some old slice time left and is of higher priority or
+                * this new queue is RT and the current one is BE
                 */
                cfq_preempt_queue(cfqd, cfqq);
-               cfq_mark_cfqq_must_dispatch(cfqq);
                blk_start_queueing(cfqd->queue);
        }
 }
@@ -1943,13 +2165,23 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
         * or if we want to idle in case it has no pending requests.
         */
        if (cfqd->active_queue == cfqq) {
+               const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
+
                if (cfq_cfqq_slice_new(cfqq)) {
                        cfq_set_prio_slice(cfqd, cfqq);
                        cfq_clear_cfqq_slice_new(cfqq);
                }
+               /*
+                * If there are no requests waiting in this queue, and
+                * there are other queues ready to issue requests, AND
+                * those other queues are issuing requests within our
+                * mean seek distance, give them a chance to run instead
+                * of idling.
+                */
                if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
                        cfq_slice_expired(cfqd, 1);
-               else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list))
+               else if (cfqq_empty && !cfq_close_cooperator(cfqd, cfqq, 1) &&
+                        sync && !rq_noidle(rq))
                        cfq_arm_slice_timer(cfqd);
        }
 
@@ -2011,7 +2243,7 @@ static int cfq_may_queue(struct request_queue *q, int rw)
        if (!cic)
                return ELV_MQUEUE_MAY;
 
-       cfqq = cic_to_cfqq(cic, rw & REQ_RW_SYNC);
+       cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
        if (cfqq) {
                cfq_init_prio_data(cfqq, cic->ioc);
                cfq_prio_boost(cfqq);
@@ -2101,11 +2333,10 @@ static void cfq_kick_queue(struct work_struct *work)
        struct cfq_data *cfqd =
                container_of(work, struct cfq_data, unplug_work);
        struct request_queue *q = cfqd->queue;
-       unsigned long flags;
 
-       spin_lock_irqsave(q->queue_lock, flags);
+       spin_lock_irq(q->queue_lock);
        blk_start_queueing(q);
-       spin_unlock_irqrestore(q->queue_lock, flags);
+       spin_unlock_irq(q->queue_lock);
 }
 
 /*
@@ -2127,6 +2358,12 @@ static void cfq_idle_slice_timer(unsigned long data)
                timed_out = 0;
 
                /*
+                * We saw a request before the queue expired, let it through
+                */
+               if (cfq_cfqq_must_dispatch(cfqq))
+                       goto out_kick;
+
+               /*
                 * expired
                 */
                if (cfq_slice_used(cfqq))
@@ -2142,10 +2379,8 @@ static void cfq_idle_slice_timer(unsigned long data)
                /*
                 * not expired and it has a request pending, let it dispatch
                 */
-               if (!RB_EMPTY_ROOT(&cfqq->sort_list)) {
-                       cfq_mark_cfqq_must_dispatch(cfqq);
+               if (!RB_EMPTY_ROOT(&cfqq->sort_list))
                        goto out_kick;
-               }
        }
 expire:
        cfq_slice_expired(cfqd, timed_out);
@@ -2158,7 +2393,7 @@ out_cont:
 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
 {
        del_timer_sync(&cfqd->idle_slice_timer);
-       kblockd_flush_work(&cfqd->unplug_work);
+       cancel_work_sync(&cfqd->unplug_work);
 }
 
 static void cfq_put_async_queues(struct cfq_data *cfqd)
@@ -2176,7 +2411,7 @@ static void cfq_put_async_queues(struct cfq_data *cfqd)
                cfq_put_queue(cfqd->async_idle_cfqq);
 }
 
-static void cfq_exit_queue(elevator_t *e)
+static void cfq_exit_queue(struct elevator_queue *e)
 {
        struct cfq_data *cfqd = e->elevator_data;
        struct request_queue *q = cfqd->queue;
@@ -2286,7 +2521,7 @@ cfq_var_store(unsigned int *var, const char *page, size_t count)
 }
 
 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                           \
-static ssize_t __FUNC(elevator_t *e, char *page)                       \
+static ssize_t __FUNC(struct elevator_queue *e, char *page)            \
 {                                                                      \
        struct cfq_data *cfqd = e->elevator_data;                       \
        unsigned int __data = __VAR;                                    \
@@ -2306,7 +2541,7 @@ SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
 #undef SHOW_FUNCTION
 
 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                        \
-static ssize_t __FUNC(elevator_t *e, const char *page, size_t count)   \
+static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)        \
 {                                                                      \
        struct cfq_data *cfqd = e->elevator_data;                       \
        unsigned int __data;                                            \