vsprintf: pre-calculate final string length for later use
[safe/jmp/linux-2.6] / block / cfq-iosched.c
index 8bc31a5..cfb0b2f 100644 (file)
@@ -117,6 +117,7 @@ struct cfq_queue {
 
        /* time when queue got scheduled in to dispatch first request. */
        unsigned long dispatch_start;
+       unsigned int allocated_slice;
        /* time when first request from queue completed and slice started. */
        unsigned long slice_start;
        unsigned long slice_end;
@@ -143,6 +144,9 @@ struct cfq_queue {
        struct cfq_rb_root *service_tree;
        struct cfq_queue *new_cfqq;
        struct cfq_group *cfqg;
+       struct cfq_group *orig_cfqg;
+       /* Sectors dispatched in current dispatch round */
+       unsigned long nr_sectors;
 };
 
 /*
@@ -270,6 +274,7 @@ struct cfq_data {
        unsigned int cfq_slice_async_rq;
        unsigned int cfq_slice_idle;
        unsigned int cfq_latency;
+       unsigned int cfq_group_isolation;
 
        struct list_head cic_list;
 
@@ -282,6 +287,7 @@ struct cfq_data {
 
        /* List of cfq groups being managed on this device*/
        struct hlist_head cfqg_list;
+       struct rcu_head rcu;
 };
 
 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
@@ -312,6 +318,8 @@ enum cfqq_state_flags {
        CFQ_CFQQ_FLAG_sync,             /* synchronous queue */
        CFQ_CFQQ_FLAG_coop,             /* cfqq is shared */
        CFQ_CFQQ_FLAG_deep,             /* sync cfqq experienced large depth */
+       CFQ_CFQQ_FLAG_wait_busy,        /* Waiting for next request */
+       CFQ_CFQQ_FLAG_wait_busy_done,   /* Got new request. Expire the queue */
 };
 
 #define CFQ_CFQQ_FNS(name)                                             \
@@ -339,10 +347,25 @@ CFQ_CFQQ_FNS(slice_new);
 CFQ_CFQQ_FNS(sync);
 CFQ_CFQQ_FNS(coop);
 CFQ_CFQQ_FNS(deep);
+CFQ_CFQQ_FNS(wait_busy);
+CFQ_CFQQ_FNS(wait_busy_done);
 #undef CFQ_CFQQ_FNS
 
+#ifdef CONFIG_DEBUG_CFQ_IOSCHED
+#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
+       blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
+                       cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
+                       blkg_path(&(cfqq)->cfqg->blkg), ##args);
+
+#define cfq_log_cfqg(cfqd, cfqg, fmt, args...)                         \
+       blk_add_trace_msg((cfqd)->queue, "%s " fmt,                     \
+                               blkg_path(&(cfqg)->blkg), ##args);      \
+
+#else
 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
        blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
+#define cfq_log_cfqg(cfqd, cfqg, fmt, args...)         do {} while (0);
+#endif
 #define cfq_log(cfqd, fmt, args...)    \
        blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
 
@@ -388,6 +411,13 @@ static inline int cfq_group_busy_queues_wl(enum wl_prio_t wl,
                + cfqg->service_trees[wl][SYNC_WORKLOAD].count;
 }
 
+static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
+                                       struct cfq_group *cfqg)
+{
+       return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count
+               + cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
+}
+
 static void cfq_dispatch_insert(struct request_queue *, struct request *);
 static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
                                       struct io_context *, gfp_t);
@@ -563,6 +593,7 @@ cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
        }
        cfqq->slice_start = jiffies;
        cfqq->slice_end = jiffies + slice;
+       cfqq->allocated_slice = slice;
        cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
 }
 
@@ -832,17 +863,19 @@ cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
        if (cfqg->nr_cfqq)
                return;
 
+       cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
        cfqg->on_st = false;
        cfqd->nr_groups--;
        st->total_weight -= cfqg->weight;
        if (!RB_EMPTY_NODE(&cfqg->rb_node))
                cfq_rb_erase(&cfqg->rb_node, st);
        cfqg->saved_workload_slice = 0;
+       blkiocg_update_blkio_group_dequeue_stats(&cfqg->blkg, 1);
 }
 
 static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq)
 {
-       unsigned int slice_used, allocated_slice;
+       unsigned int slice_used;
 
        /*
         * Queue got expired before even a single request completed or
@@ -859,12 +892,12 @@ static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq)
                                        1);
        } else {
                slice_used = jiffies - cfqq->slice_start;
-               allocated_slice = cfqq->slice_end - cfqq->slice_start;
-               if (slice_used > allocated_slice)
-                       slice_used = allocated_slice;
+               if (slice_used > cfqq->allocated_slice)
+                       slice_used = cfqq->allocated_slice;
        }
 
-       cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u", slice_used);
+       cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u sect=%lu", slice_used,
+                               cfqq->nr_sectors);
        return slice_used;
 }
 
@@ -872,13 +905,19 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
                                struct cfq_queue *cfqq)
 {
        struct cfq_rb_root *st = &cfqd->grp_service_tree;
-       unsigned int used_sl;
+       unsigned int used_sl, charge_sl;
+       int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
+                       - cfqg->service_tree_idle.count;
+
+       BUG_ON(nr_sync < 0);
+       used_sl = charge_sl = cfq_cfqq_slice_usage(cfqq);
 
-       used_sl = cfq_cfqq_slice_usage(cfqq);
+       if (!cfq_cfqq_sync(cfqq) && !nr_sync)
+               charge_sl = cfqq->allocated_slice;
 
        /* Can't update vdisktime while group is on service tree */
        cfq_rb_erase(&cfqg->rb_node, st);
-       cfqg->vdisktime += cfq_scale_slice(used_sl, cfqg);
+       cfqg->vdisktime += cfq_scale_slice(charge_sl, cfqg);
        __cfq_group_service_tree_add(st, cfqg);
 
        /* This group is being expired. Save the context */
@@ -889,6 +928,11 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
                cfqg->saved_serving_prio = cfqd->serving_prio;
        } else
                cfqg->saved_workload_slice = 0;
+
+       cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
+                                       st->min_vdisktime);
+       blkiocg_update_blkio_group_stats(&cfqg->blkg, used_sl,
+                                               cfqq->nr_sectors);
 }
 
 #ifdef CONFIG_CFQ_GROUP_IOSCHED
@@ -899,6 +943,12 @@ static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg)
        return NULL;
 }
 
+void
+cfq_update_blkio_group_weight(struct blkio_group *blkg, unsigned int weight)
+{
+       cfqg_of_blkg(blkg)->weight = weight;
+}
+
 static struct cfq_group *
 cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
 {
@@ -907,9 +957,11 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
        void *key = cfqd;
        int i, j;
        struct cfq_rb_root *st;
+       struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
+       unsigned int major, minor;
 
        /* Do we need to take this reference */
-       if (!css_tryget(&blkcg->css))
+       if (!blkiocg_css_tryget(blkcg))
                return NULL;;
 
        cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key));
@@ -934,13 +986,15 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
        atomic_set(&cfqg->ref, 1);
 
        /* Add group onto cgroup list */
-       blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd);
+       sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
+       blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
+                                       MKDEV(major, minor));
 
        /* Add group on cfqd list */
        hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
 
 done:
-       css_put(&blkcg->css);
+       blkiocg_css_put(blkcg);
        return cfqg;
 }
 
@@ -1069,6 +1123,33 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
        struct cfq_rb_root *service_tree;
        int left;
        int new_cfqq = 1;
+       int group_changed = 0;
+
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+       if (!cfqd->cfq_group_isolation
+           && cfqq_type(cfqq) == SYNC_NOIDLE_WORKLOAD
+           && cfqq->cfqg && cfqq->cfqg != &cfqd->root_group) {
+               /* Move this cfq to root group */
+               cfq_log_cfqq(cfqd, cfqq, "moving to root group");
+               if (!RB_EMPTY_NODE(&cfqq->rb_node))
+                       cfq_group_service_tree_del(cfqd, cfqq->cfqg);
+               cfqq->orig_cfqg = cfqq->cfqg;
+               cfqq->cfqg = &cfqd->root_group;
+               atomic_inc(&cfqd->root_group.ref);
+               group_changed = 1;
+       } else if (!cfqd->cfq_group_isolation
+                  && cfqq_type(cfqq) == SYNC_WORKLOAD && cfqq->orig_cfqg) {
+               /* cfqq is sequential now needs to go to its original group */
+               BUG_ON(cfqq->cfqg != &cfqd->root_group);
+               if (!RB_EMPTY_NODE(&cfqq->rb_node))
+                       cfq_group_service_tree_del(cfqd, cfqq->cfqg);
+               cfq_put_cfqg(cfqq->cfqg);
+               cfqq->cfqg = cfqq->orig_cfqg;
+               cfqq->orig_cfqg = NULL;
+               group_changed = 1;
+               cfq_log_cfqq(cfqd, cfqq, "moved to origin group");
+       }
+#endif
 
        service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
                                                cfqq_type(cfqq), cfqd);
@@ -1139,7 +1220,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
        rb_link_node(&cfqq->rb_node, parent, p);
        rb_insert_color(&cfqq->rb_node, &service_tree->rb);
        service_tree->count++;
-       if (add_front || !new_cfqq)
+       if ((add_front || !new_cfqq) && !group_changed)
                return;
        cfq_group_service_tree_add(cfqd, cfqq->cfqg);
 }
@@ -1434,6 +1515,9 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
        struct cfq_io_context *cic;
        struct cfq_queue *cfqq;
 
+       /* Deny merge if bio and rq don't belong to same cfq group */
+       if ((RQ_CFQQ(rq))->cfqg != cfq_get_cfqg(cfqd, 0))
+               return false;
        /*
         * Disallow merge of a sync bio into an async request.
         */
@@ -1459,8 +1543,10 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
                cfq_log_cfqq(cfqd, cfqq, "set_active");
                cfqq->slice_start = 0;
                cfqq->dispatch_start = jiffies;
+               cfqq->allocated_slice = 0;
                cfqq->slice_end = 0;
                cfqq->slice_dispatch = 0;
+               cfqq->nr_sectors = 0;
 
                cfq_clear_cfqq_wait_request(cfqq);
                cfq_clear_cfqq_must_dispatch(cfqq);
@@ -1487,6 +1573,8 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                del_timer(&cfqd->idle_slice_timer);
 
        cfq_clear_cfqq_wait_request(cfqq);
+       cfq_clear_cfqq_wait_busy(cfqq);
+       cfq_clear_cfqq_wait_busy_done(cfqq);
 
        /*
         * store what was left of this slice, if the queue idled/timed out
@@ -1670,6 +1758,10 @@ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
        if (!cfqq)
                return NULL;
 
+       /* If new queue belongs to different cfq_group, don't choose it */
+       if (cur_cfqq->cfqg != cfqq->cfqg)
+               return NULL;
+
        /*
         * It only makes sense to merge sync queues.
         */
@@ -1704,7 +1796,8 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
                return false;
 
        /* We do for queues that were marked with idle window flag. */
-       if (cfq_cfqq_idle_window(cfqq))
+       if (cfq_cfqq_idle_window(cfqq) &&
+          !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
                return true;
 
        /*
@@ -1784,6 +1877,7 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
 
        if (cfq_cfqq_sync(cfqq))
                cfqd->sync_flight++;
+       cfqq->nr_sectors += blk_rq_sectors(rq);
 }
 
 /*
@@ -1966,11 +2060,24 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
                max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio],
                      cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg));
 
-       if (cfqd->serving_type == ASYNC_WORKLOAD)
+       if (cfqd->serving_type == ASYNC_WORKLOAD) {
+               unsigned int tmp;
+
+               /*
+                * Async queues are currently system wide. Just taking
+                * proportion of queues with-in same group will lead to higher
+                * async ratio system wide as generally root group is going
+                * to have higher weight. A more accurate thing would be to
+                * calculate system wide asnc/sync ratio.
+                */
+               tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg);
+               tmp = tmp/cfqd->busy_queues;
+               slice = min_t(unsigned, slice, tmp);
+
                /* async workload slice is scaled down according to
                 * the sync/async slice ratio. */
                slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
-       else
+       else
                /* sync workload slice is at least 2 * cfq_slice_idle */
                slice = max(slice, 2 * cfqd->cfq_slice_idle);
 
@@ -2024,7 +2131,8 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
        /*
         * The active queue has run out of time, expire it and select new.
         */
-       if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq))
+       if ((cfq_slice_used(cfqq) || cfq_cfqq_wait_busy_done(cfqq))
+            && !cfq_cfqq_must_dispatch(cfqq))
                goto expire;
 
        /*
@@ -2260,7 +2368,7 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
 static void cfq_put_queue(struct cfq_queue *cfqq)
 {
        struct cfq_data *cfqd = cfqq->cfqd;
-       struct cfq_group *cfqg;
+       struct cfq_group *cfqg, *orig_cfqg;
 
        BUG_ON(atomic_read(&cfqq->ref) <= 0);
 
@@ -2271,6 +2379,7 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
        BUG_ON(rb_first(&cfqq->sort_list));
        BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
        cfqg = cfqq->cfqg;
+       orig_cfqg = cfqq->orig_cfqg;
 
        if (unlikely(cfqd->active_queue == cfqq)) {
                __cfq_slice_expired(cfqd, cfqq, 0);
@@ -2280,6 +2389,8 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
        BUG_ON(cfq_cfqq_on_rr(cfqq));
        kmem_cache_free(cfq_pool, cfqq);
        cfq_put_cfqg(cfqg);
+       if (orig_cfqg)
+               cfq_put_cfqg(orig_cfqg);
 }
 
 /*
@@ -2572,6 +2683,41 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
        cfqq->pid = pid;
 }
 
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+static void changed_cgroup(struct io_context *ioc, struct cfq_io_context *cic)
+{
+       struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1);
+       struct cfq_data *cfqd = cic->key;
+       unsigned long flags;
+       struct request_queue *q;
+
+       if (unlikely(!cfqd))
+               return;
+
+       q = cfqd->queue;
+
+       spin_lock_irqsave(q->queue_lock, flags);
+
+       if (sync_cfqq) {
+               /*
+                * Drop reference to sync queue. A new sync queue will be
+                * assigned in new group upon arrival of a fresh request.
+                */
+               cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
+               cic_set_cfqq(cic, NULL, 1);
+               cfq_put_queue(sync_cfqq);
+       }
+
+       spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+static void cfq_ioc_set_cgroup(struct io_context *ioc)
+{
+       call_for_each_cic(ioc, changed_cgroup);
+       ioc->cgroup_changed = 0;
+}
+#endif  /* CONFIG_CFQ_GROUP_IOSCHED */
+
 static struct cfq_queue *
 cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
                     struct io_context *ioc, gfp_t gfp_mask)
@@ -2804,6 +2950,10 @@ out:
        if (unlikely(ioc->ioprio_changed))
                cfq_ioc_set_ioprio(ioc);
 
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+       if (unlikely(ioc->cgroup_changed))
+               cfq_ioc_set_cgroup(ioc);
+#endif
        return cic;
 err_free:
        cfq_cic_free(cic);
@@ -2921,22 +3071,12 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
        if (!cfqq)
                return false;
 
-       if (cfq_slice_used(cfqq))
-               return true;
-
        if (cfq_class_idle(new_cfqq))
                return false;
 
        if (cfq_class_idle(cfqq))
                return true;
 
-       /* Allow preemption only if we are idling on sync-noidle tree */
-       if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
-           cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
-           new_cfqq->service_tree->count == 2 &&
-           RB_EMPTY_ROOT(&cfqq->sort_list))
-               return true;
-
        /*
         * if the new request is sync, but the currently running queue is
         * not, let the sync request have priority.
@@ -2944,6 +3084,19 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
        if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
                return true;
 
+       if (new_cfqq->cfqg != cfqq->cfqg)
+               return false;
+
+       if (cfq_slice_used(cfqq))
+               return true;
+
+       /* Allow preemption only if we are idling on sync-noidle tree */
+       if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
+           cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
+           new_cfqq->service_tree->count == 2 &&
+           RB_EMPTY_ROOT(&cfqq->sort_list))
+               return true;
+
        /*
         * So both queues are sync. Let the new request get disk time if
         * it's a metadata request and the current queue is doing regular IO.
@@ -3012,6 +3165,10 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
        cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
 
        if (cfqq == cfqd->active_queue) {
+               if (cfq_cfqq_wait_busy(cfqq)) {
+                       cfq_clear_cfqq_wait_busy(cfqq);
+                       cfq_mark_cfqq_wait_busy_done(cfqq);
+               }
                /*
                 * Remember that we saw a request from this process, but
                 * don't start queuing just yet. Otherwise we risk seeing lots
@@ -3102,7 +3259,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
        unsigned long now;
 
        now = jiffies;
-       cfq_log_cfqq(cfqd, cfqq, "complete");
+       cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d", !!rq_noidle(rq));
 
        cfq_update_hw_tag(cfqd);
 
@@ -3130,6 +3287,17 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
                        cfq_set_prio_slice(cfqd, cfqq);
                        cfq_clear_cfqq_slice_new(cfqq);
                }
+
+               /*
+                * If this queue consumed its slice and this is last queue
+                * in the group, wait for next request before we expire
+                * the queue
+                */
+               if (cfq_slice_used(cfqq) && cfqq->cfqg->nr_cfqq == 1) {
+                       cfqq->slice_end = jiffies + cfqd->cfq_slice_idle;
+                       cfq_mark_cfqq_wait_busy(cfqq);
+               }
+
                /*
                 * Idling is not enabled on:
                 * - expired queues
@@ -3149,7 +3317,8 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
                         * only if we processed at least one !rq_noidle request
                         */
                        if (cfqd->serving_type == SYNC_WORKLOAD
-                           || cfqd->noidle_tree_requires_idle)
+                           || cfqd->noidle_tree_requires_idle
+                           || cfqq->cfqg->nr_cfqq == 1)
                                cfq_arm_slice_timer(cfqd);
                }
        }
@@ -3434,6 +3603,11 @@ static void cfq_put_async_queues(struct cfq_data *cfqd)
                cfq_put_queue(cfqd->async_idle_cfqq);
 }
 
+static void cfq_cfqd_free(struct rcu_head *head)
+{
+       kfree(container_of(head, struct cfq_data, rcu));
+}
+
 static void cfq_exit_queue(struct elevator_queue *e)
 {
        struct cfq_data *cfqd = e->elevator_data;
@@ -3463,8 +3637,7 @@ static void cfq_exit_queue(struct elevator_queue *e)
        cfq_shutdown_timer_wq(cfqd);
 
        /* Wait for cfqg->blkg->key accessors to exit their grace periods. */
-       synchronize_rcu();
-       kfree(cfqd);
+       call_rcu(&cfqd->rcu, cfq_cfqd_free);
 }
 
 static void *cfq_init_queue(struct request_queue *q)
@@ -3496,7 +3669,8 @@ static void *cfq_init_queue(struct request_queue *q)
         * to make sure that cfq_put_cfqg() does not try to kfree root group
         */
        atomic_set(&cfqg->ref, 1);
-       blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg, (void *)cfqd);
+       blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg, (void *)cfqd,
+                                       0);
 #endif
        /*
         * Not strictly needed (since RB_ROOT just clears the node and we
@@ -3535,8 +3709,10 @@ static void *cfq_init_queue(struct request_queue *q)
        cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
        cfqd->cfq_slice_idle = cfq_slice_idle;
        cfqd->cfq_latency = 1;
+       cfqd->cfq_group_isolation = 0;
        cfqd->hw_tag = -1;
        cfqd->last_end_sync_rq = jiffies;
+       INIT_RCU_HEAD(&cfqd->rcu);
        return cfqd;
 }
 
@@ -3605,6 +3781,7 @@ SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
 SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
+SHOW_FUNCTION(cfq_group_isolation_show, cfqd->cfq_group_isolation, 0);
 #undef SHOW_FUNCTION
 
 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                        \
@@ -3637,6 +3814,7 @@ STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
                UINT_MAX, 0);
 STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
+STORE_FUNCTION(cfq_group_isolation_store, &cfqd->cfq_group_isolation, 0, 1, 0);
 #undef STORE_FUNCTION
 
 #define CFQ_ATTR(name) \
@@ -3653,6 +3831,7 @@ static struct elv_fs_entry cfq_attrs[] = {
        CFQ_ATTR(slice_async_rq),
        CFQ_ATTR(slice_idle),
        CFQ_ATTR(low_latency),
+       CFQ_ATTR(group_isolation),
        __ATTR_NULL
 };
 
@@ -3682,6 +3861,17 @@ static struct elevator_type iosched_cfq = {
        .elevator_owner =       THIS_MODULE,
 };
 
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+static struct blkio_policy_type blkio_policy_cfq = {
+       .ops = {
+               .blkio_unlink_group_fn =        cfq_unlink_blkio_group,
+               .blkio_update_group_weight_fn = cfq_update_blkio_group_weight,
+       },
+};
+#else
+static struct blkio_policy_type blkio_policy_cfq;
+#endif
+
 static int __init cfq_init(void)
 {
        /*
@@ -3696,6 +3886,7 @@ static int __init cfq_init(void)
                return -ENOMEM;
 
        elv_register(&iosched_cfq);
+       blkio_policy_register(&blkio_policy_cfq);
 
        return 0;
 }
@@ -3703,6 +3894,7 @@ static int __init cfq_init(void)
 static void __exit cfq_exit(void)
 {
        DECLARE_COMPLETION_ONSTACK(all_gone);
+       blkio_policy_unregister(&blkio_policy_cfq);
        elv_unregister(&iosched_cfq);
        ioc_gone = &all_gone;
        /* ioc_gone's update must be visible before reading ioc_count */