nfsd: move some of fh_compose into helper functions
[safe/jmp/linux-2.6] / block / cfq-iosched.c
index cba8a5d..fd7080e 100644 (file)
@@ -71,6 +71,51 @@ struct cfq_rb_root {
 #define CFQ_RB_ROOT    (struct cfq_rb_root) { RB_ROOT, NULL, }
 
 /*
+ * Per process-grouping structure
+ */
+struct cfq_queue {
+       /* reference count */
+       atomic_t ref;
+       /* various state flags, see below */
+       unsigned int flags;
+       /* parent cfq_data */
+       struct cfq_data *cfqd;
+       /* service_tree member */
+       struct rb_node rb_node;
+       /* service_tree key */
+       unsigned long rb_key;
+       /* prio tree member */
+       struct rb_node p_node;
+       /* prio tree root we belong to, if any */
+       struct rb_root *p_root;
+       /* sorted list of pending requests */
+       struct rb_root sort_list;
+       /* if fifo isn't expired, next request to serve */
+       struct request *next_rq;
+       /* requests queued in sort_list */
+       int queued[2];
+       /* currently allocated requests */
+       int allocated[2];
+       /* fifo list of requests in sort_list */
+       struct list_head fifo;
+
+       unsigned long slice_end;
+       long slice_resid;
+       unsigned int slice_dispatch;
+
+       /* pending metadata requests */
+       int meta_pending;
+       /* number of requests that are on the dispatch list or inside driver */
+       int dispatched;
+
+       /* io prio of this group */
+       unsigned short ioprio, org_ioprio;
+       unsigned short ioprio_class, org_ioprio_class;
+
+       pid_t pid;
+};
+
+/*
  * Per block device queue structure
  */
 struct cfq_data {
@@ -80,6 +125,14 @@ struct cfq_data {
         * rr list of queues with requests and the count of them
         */
        struct cfq_rb_root service_tree;
+
+       /*
+        * Each priority tree is sorted by next_request position.  These
+        * trees are used when determining if two or more queues are
+        * interleaving requests (see cfq_close_cooperator).
+        */
+       struct rb_root prio_trees[CFQ_PRIO_LISTS];
+
        unsigned int busy_queues;
        /*
         * Used to track any pending rt requests so we can pre-empt current
@@ -114,7 +167,6 @@ struct cfq_data {
        struct cfq_queue *async_idle_cfqq;
 
        sector_t last_position;
-       unsigned long last_end_request;
 
        /*
         * tunables, see top of file
@@ -128,47 +180,11 @@ struct cfq_data {
        unsigned int cfq_slice_idle;
 
        struct list_head cic_list;
-};
 
-/*
- * Per process-grouping structure
- */
-struct cfq_queue {
-       /* reference count */
-       atomic_t ref;
-       /* various state flags, see below */
-       unsigned int flags;
-       /* parent cfq_data */
-       struct cfq_data *cfqd;
-       /* service_tree member */
-       struct rb_node rb_node;
-       /* service_tree key */
-       unsigned long rb_key;
-       /* sorted list of pending requests */
-       struct rb_root sort_list;
-       /* if fifo isn't expired, next request to serve */
-       struct request *next_rq;
-       /* requests queued in sort_list */
-       int queued[2];
-       /* currently allocated requests */
-       int allocated[2];
-       /* fifo list of requests in sort_list */
-       struct list_head fifo;
-
-       unsigned long slice_end;
-       long slice_resid;
-       unsigned int slice_dispatch;
-
-       /* pending metadata requests */
-       int meta_pending;
-       /* number of requests that are on the dispatch list or inside driver */
-       int dispatched;
-
-       /* io prio of this group */
-       unsigned short ioprio, org_ioprio;
-       unsigned short ioprio_class, org_ioprio_class;
-
-       pid_t pid;
+       /*
+        * Fallback dummy cfqq for extreme OOM conditions
+        */
+       struct cfq_queue oom_cfqq;
 };
 
 enum cfqq_state_flags {
@@ -182,6 +198,7 @@ enum cfqq_state_flags {
        CFQ_CFQQ_FLAG_prio_changed,     /* task priority has changed */
        CFQ_CFQQ_FLAG_slice_new,        /* no requests dispatched in slice */
        CFQ_CFQQ_FLAG_sync,             /* synchronous queue */
+       CFQ_CFQQ_FLAG_coop,             /* has done a coop jump of the queue */
 };
 
 #define CFQ_CFQQ_FNS(name)                                             \
@@ -208,6 +225,7 @@ CFQ_CFQQ_FNS(idle_window);
 CFQ_CFQQ_FNS(prio_changed);
 CFQ_CFQQ_FNS(slice_new);
 CFQ_CFQQ_FNS(sync);
+CFQ_CFQQ_FNS(coop);
 #undef CFQ_CFQQ_FNS
 
 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
@@ -335,8 +353,8 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
        else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
                return rq2;
 
-       s1 = rq1->sector;
-       s2 = rq2->sector;
+       s1 = blk_rq_pos(rq1);
+       s2 = blk_rq_pos(rq2);
 
        last = cfqd->last_position;
 
@@ -416,13 +434,17 @@ static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
        return NULL;
 }
 
+static void rb_erase_init(struct rb_node *n, struct rb_root *root)
+{
+       rb_erase(n, root);
+       RB_CLEAR_NODE(n);
+}
+
 static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
 {
        if (root->left == n)
                root->left = NULL;
-
-       rb_erase(n, &root->rb);
-       RB_CLEAR_NODE(n);
+       rb_erase_init(n, &root->rb);
 }
 
 /*
@@ -467,8 +489,8 @@ static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
  * requests waiting to be processed. It is sorted in the order that
  * we will service the queues.
  */
-static void cfq_service_tree_add(struct cfq_data *cfqd,
-                                   struct cfq_queue *cfqq, int add_front)
+static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
+                                int add_front)
 {
        struct rb_node **p, *parent;
        struct cfq_queue *__cfqq;
@@ -541,6 +563,67 @@ static void cfq_service_tree_add(struct cfq_data *cfqd,
        rb_insert_color(&cfqq->rb_node, &cfqd->service_tree.rb);
 }
 
+static struct cfq_queue *
+cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
+                    sector_t sector, struct rb_node **ret_parent,
+                    struct rb_node ***rb_link)
+{
+       struct rb_node **p, *parent;
+       struct cfq_queue *cfqq = NULL;
+
+       parent = NULL;
+       p = &root->rb_node;
+       while (*p) {
+               struct rb_node **n;
+
+               parent = *p;
+               cfqq = rb_entry(parent, struct cfq_queue, p_node);
+
+               /*
+                * Sort strictly based on sector.  Smallest to the left,
+                * largest to the right.
+                */
+               if (sector > blk_rq_pos(cfqq->next_rq))
+                       n = &(*p)->rb_right;
+               else if (sector < blk_rq_pos(cfqq->next_rq))
+                       n = &(*p)->rb_left;
+               else
+                       break;
+               p = n;
+               cfqq = NULL;
+       }
+
+       *ret_parent = parent;
+       if (rb_link)
+               *rb_link = p;
+       return cfqq;
+}
+
+static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+       struct rb_node **p, *parent;
+       struct cfq_queue *__cfqq;
+
+       if (cfqq->p_root) {
+               rb_erase(&cfqq->p_node, cfqq->p_root);
+               cfqq->p_root = NULL;
+       }
+
+       if (cfq_class_idle(cfqq))
+               return;
+       if (!cfqq->next_rq)
+               return;
+
+       cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
+       __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
+                                     blk_rq_pos(cfqq->next_rq), &parent, &p);
+       if (!__cfqq) {
+               rb_link_node(&cfqq->p_node, parent, p);
+               rb_insert_color(&cfqq->p_node, cfqq->p_root);
+       } else
+               cfqq->p_root = NULL;
+}
+
 /*
  * Update cfqq's position in the service tree.
  */
@@ -549,8 +632,10 @@ static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
        /*
         * Resorting requires the cfqq to be on the RR list already.
         */
-       if (cfq_cfqq_on_rr(cfqq))
+       if (cfq_cfqq_on_rr(cfqq)) {
                cfq_service_tree_add(cfqd, cfqq, 0);
+               cfq_prio_tree_add(cfqd, cfqq);
+       }
 }
 
 /*
@@ -581,6 +666,10 @@ static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 
        if (!RB_EMPTY_NODE(&cfqq->rb_node))
                cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
+       if (cfqq->p_root) {
+               rb_erase(&cfqq->p_node, cfqq->p_root);
+               cfqq->p_root = NULL;
+       }
 
        BUG_ON(!cfqd->busy_queues);
        cfqd->busy_queues--;
@@ -610,7 +699,7 @@ static void cfq_add_rq_rb(struct request *rq)
 {
        struct cfq_queue *cfqq = RQ_CFQQ(rq);
        struct cfq_data *cfqd = cfqq->cfqd;
-       struct request *__alias;
+       struct request *__alias, *prev;
 
        cfqq->queued[rq_is_sync(rq)]++;
 
@@ -627,7 +716,15 @@ static void cfq_add_rq_rb(struct request *rq)
        /*
         * check if this request is a better next-serve candidate
         */
+       prev = cfqq->next_rq;
        cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq);
+
+       /*
+        * adjust priority tree position, if ->next_rq changes
+        */
+       if (prev != cfqq->next_rq)
+               cfq_prio_tree_add(cfqd, cfqq);
+
        BUG_ON(!cfqq->next_rq);
 }
 
@@ -667,7 +764,7 @@ static void cfq_activate_request(struct request_queue *q, struct request *rq)
        cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
                                                cfqd->rq_in_driver);
 
-       cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors;
+       cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
 }
 
 static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
@@ -840,11 +937,15 @@ static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
 /*
  * Get and set a new active queue for service.
  */
-static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
+static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
+                                             struct cfq_queue *cfqq)
 {
-       struct cfq_queue *cfqq;
+       if (!cfqq) {
+               cfqq = cfq_get_next_queue(cfqd);
+               if (cfqq)
+                       cfq_clear_cfqq_coop(cfqq);
+       }
 
-       cfqq = cfq_get_next_queue(cfqd);
        __cfq_set_active_queue(cfqd, cfqq);
        return cfqq;
 }
@@ -852,34 +953,106 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
 static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
                                          struct request *rq)
 {
-       if (rq->sector >= cfqd->last_position)
-               return rq->sector - cfqd->last_position;
+       if (blk_rq_pos(rq) >= cfqd->last_position)
+               return blk_rq_pos(rq) - cfqd->last_position;
        else
-               return cfqd->last_position - rq->sector;
+               return cfqd->last_position - blk_rq_pos(rq);
 }
 
+#define CIC_SEEK_THR   8 * 1024
+#define CIC_SEEKY(cic) ((cic)->seek_mean > CIC_SEEK_THR)
+
 static inline int cfq_rq_close(struct cfq_data *cfqd, struct request *rq)
 {
        struct cfq_io_context *cic = cfqd->active_cic;
+       sector_t sdist = cic->seek_mean;
 
        if (!sample_valid(cic->seek_samples))
-               return 0;
+               sdist = CIC_SEEK_THR;
+
+       return cfq_dist_from_last(cfqd, rq) <= sdist;
+}
+
+static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
+                                   struct cfq_queue *cur_cfqq)
+{
+       struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
+       struct rb_node *parent, *node;
+       struct cfq_queue *__cfqq;
+       sector_t sector = cfqd->last_position;
 
-       return cfq_dist_from_last(cfqd, rq) <= cic->seek_mean;
+       if (RB_EMPTY_ROOT(root))
+               return NULL;
+
+       /*
+        * First, if we find a request starting at the end of the last
+        * request, choose it.
+        */
+       __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
+       if (__cfqq)
+               return __cfqq;
+
+       /*
+        * If the exact sector wasn't found, the parent of the NULL leaf
+        * will contain the closest sector.
+        */
+       __cfqq = rb_entry(parent, struct cfq_queue, p_node);
+       if (cfq_rq_close(cfqd, __cfqq->next_rq))
+               return __cfqq;
+
+       if (blk_rq_pos(__cfqq->next_rq) < sector)
+               node = rb_next(&__cfqq->p_node);
+       else
+               node = rb_prev(&__cfqq->p_node);
+       if (!node)
+               return NULL;
+
+       __cfqq = rb_entry(node, struct cfq_queue, p_node);
+       if (cfq_rq_close(cfqd, __cfqq->next_rq))
+               return __cfqq;
+
+       return NULL;
 }
 
-static int cfq_close_cooperator(struct cfq_data *cfq_data,
-                               struct cfq_queue *cfqq)
+/*
+ * cfqd - obvious
+ * cur_cfqq - passed in so that we don't decide that the current queue is
+ *           closely cooperating with itself.
+ *
+ * So, basically we're assuming that that cur_cfqq has dispatched at least
+ * one request, and that cfqd->last_position reflects a position on the disk
+ * associated with the I/O issued by cur_cfqq.  I'm not sure this is a valid
+ * assumption.
+ */
+static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
+                                             struct cfq_queue *cur_cfqq,
+                                             int probe)
 {
+       struct cfq_queue *cfqq;
+
+       /*
+        * A valid cfq_io_context is necessary to compare requests against
+        * the seek_mean of the current cfqq.
+        */
+       if (!cfqd->active_cic)
+               return NULL;
+
        /*
         * We should notice if some of the queues are cooperating, eg
         * working closely on the same area of the disk. In that case,
         * we can group them together and don't waste time idling.
         */
-       return 0;
-}
+       cfqq = cfqq_close(cfqd, cur_cfqq);
+       if (!cfqq)
+               return NULL;
 
-#define CIC_SEEKY(cic) ((cic)->seek_mean > (8 * 1024))
+       if (cfq_cfqq_coop(cfqq))
+               return NULL;
+
+       if (!probe)
+               cfq_mark_cfqq_coop(cfqq);
+       return cfqq;
+}
 
 static void cfq_arm_slice_timer(struct cfq_data *cfqd)
 {
@@ -917,13 +1090,6 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
        if (!cic || !atomic_read(&cic->ioc->nr_tasks))
                return;
 
-       /*
-        * See if this prio level has a good candidate
-        */
-       if (cfq_close_cooperator(cfqd, cfqq) &&
-           (sample_valid(cic->ttime_samples) && cic->ttime_mean > 2))
-               return;
-
        cfq_mark_cfqq_wait_request(cfqq);
 
        /*
@@ -936,7 +1102,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
                sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT));
 
        mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
-       cfq_log(cfqd, "arm_idle: %lu", sl);
+       cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl);
 }
 
 /*
@@ -1000,7 +1166,7 @@ cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  */
 static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
 {
-       struct cfq_queue *cfqq;
+       struct cfq_queue *cfqq, *new_cfqq = NULL;
 
        cfqq = cfqd->active_queue;
        if (!cfqq)
@@ -1034,6 +1200,16 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
                goto keep_queue;
 
        /*
+        * If another queue has a request waiting within our mean seek
+        * distance, let it run.  The expire code will check for close
+        * cooperators and put the close queue at the front of the service
+        * tree.
+        */
+       new_cfqq = cfq_close_cooperator(cfqd, cfqq, 0);
+       if (new_cfqq)
+               goto expire;
+
+       /*
         * No requests pending. If the active queue still has requests in
         * flight or is idling for a new request, allow either of these
         * conditions to happen (or time out) before selecting a new queue.
@@ -1047,7 +1223,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
 expire:
        cfq_slice_expired(cfqd, 0);
 new_queue:
-       cfqq = cfq_set_active_queue(cfqd);
+       cfqq = cfq_set_active_queue(cfqd, new_cfqq);
 keep_queue:
        return cfqq;
 }
@@ -1081,7 +1257,7 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd)
 
        BUG_ON(cfqd->busy_queues);
 
-       cfq_log(cfqd, "forced_dispatch=%d\n", dispatched);
+       cfq_log(cfqd, "forced_dispatch=%d", dispatched);
        return dispatched;
 }
 
@@ -1110,7 +1286,7 @@ static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
        if (!cfqd->active_cic) {
                struct cfq_io_context *cic = RQ_CIC(rq);
 
-               atomic_inc(&cic->ioc->refcount);
+               atomic_long_inc(&cic->ioc->refcount);
                cfqd->active_cic = cic;
        }
 }
@@ -1470,6 +1646,26 @@ static void cfq_ioc_set_ioprio(struct io_context *ioc)
        ioc->ioprio_changed = 0;
 }
 
+static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
+                         pid_t pid, int is_sync)
+{
+       RB_CLEAR_NODE(&cfqq->rb_node);
+       RB_CLEAR_NODE(&cfqq->p_node);
+       INIT_LIST_HEAD(&cfqq->fifo);
+
+       atomic_set(&cfqq->ref, 0);
+       cfqq->cfqd = cfqd;
+
+       cfq_mark_cfqq_prio_changed(cfqq);
+
+       if (is_sync) {
+               if (!cfq_class_idle(cfqq))
+                       cfq_mark_cfqq_idle_window(cfqq);
+               cfq_mark_cfqq_sync(cfqq);
+       }
+       cfqq->pid = pid;
+}
+
 static struct cfq_queue *
 cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync,
                     struct io_context *ioc, gfp_t gfp_mask)
@@ -1482,55 +1678,40 @@ retry:
        /* cic always exists here */
        cfqq = cic_to_cfqq(cic, is_sync);
 
-       if (!cfqq) {
+       /*
+        * Always try a new alloc if we fell back to the OOM cfqq
+        * originally, since it should just be a temporary situation.
+        */
+       if (!cfqq || cfqq == &cfqd->oom_cfqq) {
+               cfqq = NULL;
                if (new_cfqq) {
                        cfqq = new_cfqq;
                        new_cfqq = NULL;
                } else if (gfp_mask & __GFP_WAIT) {
-                       /*
-                        * Inform the allocator of the fact that we will
-                        * just repeat this allocation if it fails, to allow
-                        * the allocator to do whatever it needs to attempt to
-                        * free memory.
-                        */
                        spin_unlock_irq(cfqd->queue->queue_lock);
                        new_cfqq = kmem_cache_alloc_node(cfq_pool,
-                                       gfp_mask | __GFP_NOFAIL | __GFP_ZERO,
+                                       gfp_mask | __GFP_ZERO,
                                        cfqd->queue->node);
                        spin_lock_irq(cfqd->queue->queue_lock);
-                       goto retry;
+                       if (new_cfqq)
+                               goto retry;
                } else {
                        cfqq = kmem_cache_alloc_node(cfq_pool,
                                        gfp_mask | __GFP_ZERO,
                                        cfqd->queue->node);
-                       if (!cfqq)
-                               goto out;
                }
 
-               RB_CLEAR_NODE(&cfqq->rb_node);
-               INIT_LIST_HEAD(&cfqq->fifo);
-
-               atomic_set(&cfqq->ref, 0);
-               cfqq->cfqd = cfqd;
-
-               cfq_mark_cfqq_prio_changed(cfqq);
-
-               cfq_init_prio_data(cfqq, ioc);
-
-               if (is_sync) {
-                       if (!cfq_class_idle(cfqq))
-                               cfq_mark_cfqq_idle_window(cfqq);
-                       cfq_mark_cfqq_sync(cfqq);
-               }
-               cfqq->pid = current->pid;
-               cfq_log_cfqq(cfqd, cfqq, "alloced");
+               if (cfqq) {
+                       cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
+                       cfq_init_prio_data(cfqq, ioc);
+                       cfq_log_cfqq(cfqd, cfqq, "alloced");
+               } else
+                       cfqq = &cfqd->oom_cfqq;
        }
 
        if (new_cfqq)
                kmem_cache_free(cfq_pool, new_cfqq);
 
-out:
-       WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
        return cfqq;
 }
 
@@ -1563,11 +1744,8 @@ cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct io_context *ioc,
                cfqq = *async_cfqq;
        }
 
-       if (!cfqq) {
+       if (!cfqq)
                cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
-               if (!cfqq)
-                       return NULL;
-       }
 
        /*
         * pin the queue now that it's allocated, scheduler exit will prune it
@@ -1743,10 +1921,12 @@ cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
        sector_t sdist;
        u64 total;
 
-       if (cic->last_request_pos < rq->sector)
-               sdist = rq->sector - cic->last_request_pos;
+       if (!cic->last_request_pos)
+               sdist = 0;
+       else if (cic->last_request_pos < blk_rq_pos(rq))
+               sdist = blk_rq_pos(rq) - cic->last_request_pos;
        else
-               sdist = cic->last_request_pos - rq->sector;
+               sdist = cic->last_request_pos - blk_rq_pos(rq);
 
        /*
         * Don't allow the seek distance to get too large from the
@@ -1896,17 +2076,27 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
        cfq_update_io_seektime(cfqd, cic, rq);
        cfq_update_idle_window(cfqd, cfqq, cic);
 
-       cic->last_request_pos = rq->sector + rq->nr_sectors;
+       cic->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
 
        if (cfqq == cfqd->active_queue) {
                /*
                 * Remember that we saw a request from this process, but
                 * don't start queuing just yet. Otherwise we risk seeing lots
                 * of tiny requests, because we disrupt the normal plugging
-                * and merging.
+                * and merging. If the request is already larger than a single
+                * page, let it rip immediately. For that case we assume that
+                * merging is already done. Ditto for a busy system that
+                * has other work pending, don't risk delaying until the
+                * idle timer unplug to continue working.
                 */
-               if (cfq_cfqq_wait_request(cfqq))
+               if (cfq_cfqq_wait_request(cfqq)) {
+                       if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
+                           cfqd->busy_queues > 1) {
+                               del_timer(&cfqd->idle_slice_timer);
+                       __blk_run_queue(cfqd->queue);
+                       }
                        cfq_mark_cfqq_must_dispatch(cfqq);
+               }
        } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
                /*
                 * not the active queue - expire current slice if it is
@@ -1915,7 +2105,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                 * this new queue is RT and the current one is BE
                 */
                cfq_preempt_queue(cfqd, cfqq);
-               blk_start_queueing(cfqd->queue);
+               __blk_run_queue(cfqd->queue);
        }
 }
 
@@ -1979,9 +2169,6 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
        if (cfq_cfqq_sync(cfqq))
                cfqd->sync_flight--;
 
-       if (!cfq_class_idle(cfqq))
-               cfqd->last_end_request = now;
-
        if (sync)
                RQ_CIC(rq)->last_end_request = now;
 
@@ -1990,16 +2177,24 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
         * or if we want to idle in case it has no pending requests.
         */
        if (cfqd->active_queue == cfqq) {
+               const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
+
                if (cfq_cfqq_slice_new(cfqq)) {
                        cfq_set_prio_slice(cfqd, cfqq);
                        cfq_clear_cfqq_slice_new(cfqq);
                }
+               /*
+                * If there are no requests waiting in this queue, and
+                * there are other queues ready to issue requests, AND
+                * those other queues are issuing requests within our
+                * mean seek distance, give them a chance to run instead
+                * of idling.
+                */
                if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
                        cfq_slice_expired(cfqd, 1);
-               else if (sync && !rq_noidle(rq) &&
-                        RB_EMPTY_ROOT(&cfqq->sort_list)) {
+               else if (cfqq_empty && !cfq_close_cooperator(cfqd, cfqq, 1) &&
+                        sync && !rq_noidle(rq))
                        cfq_arm_slice_timer(cfqd);
-               }
        }
 
        if (!cfqd->rq_in_driver)
@@ -2116,12 +2311,8 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
                goto queue_fail;
 
        cfqq = cic_to_cfqq(cic, is_sync);
-       if (!cfqq) {
+       if (!cfqq || cfqq == &cfqd->oom_cfqq) {
                cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask);
-
-               if (!cfqq)
-                       goto queue_fail;
-
                cic_set_cfqq(cic, cfqq, is_sync);
        }
 
@@ -2150,11 +2341,10 @@ static void cfq_kick_queue(struct work_struct *work)
        struct cfq_data *cfqd =
                container_of(work, struct cfq_data, unplug_work);
        struct request_queue *q = cfqd->queue;
-       unsigned long flags;
 
-       spin_lock_irqsave(q->queue_lock, flags);
-       blk_start_queueing(q);
-       spin_unlock_irqrestore(q->queue_lock, flags);
+       spin_lock_irq(q->queue_lock);
+       __blk_run_queue(cfqd->queue);
+       spin_unlock_irq(q->queue_lock);
 }
 
 /*
@@ -2261,12 +2451,30 @@ static void cfq_exit_queue(struct elevator_queue *e)
 static void *cfq_init_queue(struct request_queue *q)
 {
        struct cfq_data *cfqd;
+       int i;
 
        cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
        if (!cfqd)
                return NULL;
 
        cfqd->service_tree = CFQ_RB_ROOT;
+
+       /*
+        * Not strictly needed (since RB_ROOT just clears the node and we
+        * zeroed cfqd on alloc), but better be safe in case someone decides
+        * to add magic to the rb code
+        */
+       for (i = 0; i < CFQ_PRIO_LISTS; i++)
+               cfqd->prio_trees[i] = RB_ROOT;
+
+       /*
+        * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
+        * Grab a permanent reference to it, so that the normal code flow
+        * will not attempt to free it.
+        */
+       cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
+       atomic_inc(&cfqd->oom_cfqq.ref);
+
        INIT_LIST_HEAD(&cfqd->cic_list);
 
        cfqd->queue = q;
@@ -2277,7 +2485,6 @@ static void *cfq_init_queue(struct request_queue *q)
 
        INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
 
-       cfqd->last_end_request = jiffies;
        cfqd->cfq_quantum = cfq_quantum;
        cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
        cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];