sched: prepatory code movement
[safe/jmp/linux-2.6] / kernel / sched_fair.c
index 6c091d6..de4250c 100644 (file)
@@ -62,16 +62,6 @@ const_debug unsigned int sysctl_sched_child_runs_first = 1;
 unsigned int __read_mostly sysctl_sched_compat_yield;
 
 /*
- * SCHED_BATCH wake-up granularity.
- * (default: 10 msec * (1 + ilog(ncpus)), units: nanoseconds)
- *
- * This option delays the preemption effects of decoupled workloads
- * and reduces their over-scheduling. Synchronous workloads will still
- * have immediate wakeup/sleep latencies.
- */
-unsigned int sysctl_sched_batch_wakeup_granularity = 10000000UL;
-
-/*
  * SCHED_OTHER wake-up granularity.
  * (default: 10 msec * (1 + ilog(ncpus)), units: nanoseconds)
  *
@@ -87,6 +77,11 @@ const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
  * CFS operations on generic schedulable entities:
  */
 
+static inline struct task_struct *task_of(struct sched_entity *se)
+{
+       return container_of(se, struct task_struct, se);
+}
+
 #ifdef CONFIG_FAIR_GROUP_SCHED
 
 /* cpu runqueue to which this cfs_rq is attached */
@@ -98,6 +93,54 @@ static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
 /* An entity is a task if it doesn't "own" a runqueue */
 #define entity_is_task(se)     (!se->my_q)
 
+/* Walk up scheduling entities hierarchy */
+#define for_each_sched_entity(se) \
+               for (; se; se = se->parent)
+
+static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
+{
+       return p->se.cfs_rq;
+}
+
+/* runqueue on which this entity is (to be) queued */
+static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
+{
+       return se->cfs_rq;
+}
+
+/* runqueue "owned" by this group */
+static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
+{
+       return grp->my_q;
+}
+
+/* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
+ * another cpu ('this_cpu')
+ */
+static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
+{
+       return cfs_rq->tg->cfs_rq[this_cpu];
+}
+
+/* Iterate thr' all leaf cfs_rq's on a runqueue */
+#define for_each_leaf_cfs_rq(rq, cfs_rq) \
+       list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
+
+/* Do the two (enqueued) entities belong to the same group ? */
+static inline int
+is_same_group(struct sched_entity *se, struct sched_entity *pse)
+{
+       if (se->cfs_rq == pse->cfs_rq)
+               return 1;
+
+       return 0;
+}
+
+static inline struct sched_entity *parent_entity(struct sched_entity *se)
+{
+       return se->parent;
+}
+
 #else  /* CONFIG_FAIR_GROUP_SCHED */
 
 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
@@ -107,13 +150,49 @@ static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
 
 #define entity_is_task(se)     1
 
-#endif /* CONFIG_FAIR_GROUP_SCHED */
+#define for_each_sched_entity(se) \
+               for (; se; se = NULL)
 
-static inline struct task_struct *task_of(struct sched_entity *se)
+static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
 {
-       return container_of(se, struct task_struct, se);
+       return &task_rq(p)->cfs;
+}
+
+static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
+{
+       struct task_struct *p = task_of(se);
+       struct rq *rq = task_rq(p);
+
+       return &rq->cfs;
+}
+
+/* runqueue "owned" by this group */
+static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
+{
+       return NULL;
+}
+
+static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
+{
+       return &cpu_rq(this_cpu)->cfs;
+}
+
+#define for_each_leaf_cfs_rq(rq, cfs_rq) \
+               for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
+
+static inline int
+is_same_group(struct sched_entity *se, struct sched_entity *pse)
+{
+       return 1;
 }
 
+static inline struct sched_entity *parent_entity(struct sched_entity *se)
+{
+       return NULL;
+}
+
+#endif /* CONFIG_FAIR_GROUP_SCHED */
+
 
 /**************************************************************
  * Scheduling class tree data structure manipulation methods:
@@ -175,8 +254,15 @@ static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
         * Maintain a cache of leftmost tree entries (it is frequently
         * used):
         */
-       if (leftmost)
+       if (leftmost) {
                cfs_rq->rb_leftmost = &se->run_node;
+               /*
+                * maintain cfs_rq->min_vruntime to be a monotonic increasing
+                * value tracking the leftmost vruntime in the tree.
+                */
+               cfs_rq->min_vruntime =
+                       max_vruntime(cfs_rq->min_vruntime, se->vruntime);
+       }
 
        rb_link_node(&se->run_node, parent, link);
        rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
@@ -184,8 +270,24 @@ static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
 
 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
-       if (cfs_rq->rb_leftmost == &se->run_node)
-               cfs_rq->rb_leftmost = rb_next(&se->run_node);
+       if (cfs_rq->rb_leftmost == &se->run_node) {
+               struct rb_node *next_node;
+               struct sched_entity *next;
+
+               next_node = rb_next(&se->run_node);
+               cfs_rq->rb_leftmost = next_node;
+
+               if (next_node) {
+                       next = rb_entry(next_node,
+                                       struct sched_entity, run_node);
+                       cfs_rq->min_vruntime =
+                               max_vruntime(cfs_rq->min_vruntime,
+                                            next->vruntime);
+               }
+       }
+
+       if (cfs_rq->next == se)
+               cfs_rq->next = NULL;
 
        rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
 }
@@ -202,17 +304,12 @@ static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
 
 static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
 {
-       struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
-       struct sched_entity *se = NULL;
-       struct rb_node *parent;
+       struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
 
-       while (*link) {
-               parent = *link;
-               se = rb_entry(parent, struct sched_entity, run_node);
-               link = &parent->rb_right;
-       }
+       if (!last)
+               return NULL;
 
-       return se;
+       return rb_entry(last, struct sched_entity, run_node);
 }
 
 /**************************************************************
@@ -265,12 +362,8 @@ static u64 __sched_period(unsigned long nr_running)
  */
 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
-       u64 slice = __sched_period(cfs_rq->nr_running);
-
-       slice *= se->load.weight;
-       do_div(slice, cfs_rq->load.weight);
-
-       return slice;
+       return calc_delta_mine(__sched_period(cfs_rq->nr_running),
+                              se->load.weight, &cfs_rq->load);
 }
 
 /*
@@ -288,11 +381,6 @@ static u64 __sched_vslice(unsigned long rq_weight, unsigned long nr_running)
        return vslice;
 }
 
-static u64 sched_vslice(struct cfs_rq *cfs_rq)
-{
-       return __sched_vslice(cfs_rq->load.weight, cfs_rq->nr_running);
-}
-
 static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
        return __sched_vslice(cfs_rq->load.weight + se->load.weight,
@@ -308,7 +396,6 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
              unsigned long delta_exec)
 {
        unsigned long delta_exec_weighted;
-       u64 vruntime;
 
        schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
 
@@ -320,19 +407,6 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
                                                        &curr->load);
        }
        curr->vruntime += delta_exec_weighted;
-
-       /*
-        * maintain cfs_rq->min_vruntime to be a monotonic increasing
-        * value tracking the leftmost vruntime in the tree.
-        */
-       if (first_fair(cfs_rq)) {
-               vruntime = min_vruntime(curr->vruntime,
-                               __pick_next_entity(cfs_rq)->vruntime);
-       } else
-               vruntime = curr->vruntime;
-
-       cfs_rq->min_vruntime =
-               max_vruntime(cfs_rq->min_vruntime, vruntime);
 }
 
 static void update_curr(struct cfs_rq *cfs_rq)
@@ -498,16 +572,11 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
 {
        u64 vruntime;
 
-       vruntime = cfs_rq->min_vruntime;
-
-       if (sched_feat(TREE_AVG)) {
-               struct sched_entity *last = __pick_last_entity(cfs_rq);
-               if (last) {
-                       vruntime += last->vruntime;
-                       vruntime >>= 1;
-               }
-       } else if (sched_feat(APPROX_AVG) && cfs_rq->nr_running)
-               vruntime += sched_vslice(cfs_rq)/2;
+       if (first_fair(cfs_rq)) {
+               vruntime = min_vruntime(cfs_rq->min_vruntime,
+                               __pick_next_entity(cfs_rq)->vruntime);
+       } else
+               vruntime = cfs_rq->min_vruntime;
 
        /*
         * The 'current' period is already promised to the current tasks,
@@ -520,8 +589,13 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
 
        if (!initial) {
                /* sleeps upto a single latency don't count. */
-               if (sched_feat(NEW_FAIR_SLEEPERS))
-                       vruntime -= sysctl_sched_latency;
+               if (sched_feat(NEW_FAIR_SLEEPERS)) {
+                       if (sched_feat(NORMALIZED_SLEEPER))
+                               vruntime -= calc_delta_fair(sysctl_sched_latency,
+                                               &cfs_rq->load);
+                       else
+                               vruntime -= sysctl_sched_latency;
+               }
 
                /* ensure we never gain time by being placed backwards. */
                vruntime = max_vruntime(se->vruntime, vruntime);
@@ -550,6 +624,21 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
        account_entity_enqueue(cfs_rq, se);
 }
 
+static void update_avg(u64 *avg, u64 sample)
+{
+       s64 diff = sample - *avg;
+       *avg += diff >> 3;
+}
+
+static void update_avg_stats(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+       if (!se->last_wakeup)
+               return;
+
+       update_avg(&se->avg_overlap, se->sum_exec_runtime - se->last_wakeup);
+       se->last_wakeup = 0;
+}
+
 static void
 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
 {
@@ -560,6 +649,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
 
        update_stats_dequeue(cfs_rq, se);
        if (sleep) {
+               update_avg_stats(cfs_rq, se);
 #ifdef CONFIG_SCHEDSTATS
                if (entity_is_task(se)) {
                        struct task_struct *tsk = task_of(se);
@@ -621,12 +711,28 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
        se->prev_sum_exec_runtime = se->sum_exec_runtime;
 }
 
+static int
+wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
+
+static struct sched_entity *
+pick_next(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+       if (!cfs_rq->next)
+               return se;
+
+       if (wakeup_preempt_entity(cfs_rq->next, se) != 0)
+               return se;
+
+       return cfs_rq->next;
+}
+
 static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
 {
        struct sched_entity *se = NULL;
 
        if (first_fair(cfs_rq)) {
                se = __pick_next_entity(cfs_rq);
+               se = pick_next(cfs_rq, se);
                set_next_entity(cfs_rq, se);
        }
 
@@ -682,103 +788,6 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
  * CFS operations on tasks:
  */
 
-#ifdef CONFIG_FAIR_GROUP_SCHED
-
-/* Walk up scheduling entities hierarchy */
-#define for_each_sched_entity(se) \
-               for (; se; se = se->parent)
-
-static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
-{
-       return p->se.cfs_rq;
-}
-
-/* runqueue on which this entity is (to be) queued */
-static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
-{
-       return se->cfs_rq;
-}
-
-/* runqueue "owned" by this group */
-static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
-{
-       return grp->my_q;
-}
-
-/* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
- * another cpu ('this_cpu')
- */
-static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
-{
-       return cfs_rq->tg->cfs_rq[this_cpu];
-}
-
-/* Iterate thr' all leaf cfs_rq's on a runqueue */
-#define for_each_leaf_cfs_rq(rq, cfs_rq) \
-       list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
-
-/* Do the two (enqueued) entities belong to the same group ? */
-static inline int
-is_same_group(struct sched_entity *se, struct sched_entity *pse)
-{
-       if (se->cfs_rq == pse->cfs_rq)
-               return 1;
-
-       return 0;
-}
-
-static inline struct sched_entity *parent_entity(struct sched_entity *se)
-{
-       return se->parent;
-}
-
-#define GROUP_IMBALANCE_PCT    20
-
-#else  /* CONFIG_FAIR_GROUP_SCHED */
-
-#define for_each_sched_entity(se) \
-               for (; se; se = NULL)
-
-static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
-{
-       return &task_rq(p)->cfs;
-}
-
-static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
-{
-       struct task_struct *p = task_of(se);
-       struct rq *rq = task_rq(p);
-
-       return &rq->cfs;
-}
-
-/* runqueue "owned" by this group */
-static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
-{
-       return NULL;
-}
-
-static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
-{
-       return &cpu_rq(this_cpu)->cfs;
-}
-
-#define for_each_leaf_cfs_rq(rq, cfs_rq) \
-               for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
-
-static inline int
-is_same_group(struct sched_entity *se, struct sched_entity *pse)
-{
-       return 1;
-}
-
-static inline struct sched_entity *parent_entity(struct sched_entity *se)
-{
-       return NULL;
-}
-
-#endif /* CONFIG_FAIR_GROUP_SCHED */
-
 #ifdef CONFIG_SCHED_HRTICK
 static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
 {
@@ -824,26 +833,15 @@ hrtick_start_fair(struct rq *rq, struct task_struct *p)
 static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
 {
        struct cfs_rq *cfs_rq;
-       struct sched_entity *se = &p->se,
-                           *topse = NULL;      /* Highest schedulable entity */
-       int incload = 1;
+       struct sched_entity *se = &p->se;
 
        for_each_sched_entity(se) {
-               topse = se;
-               if (se->on_rq) {
-                       incload = 0;
+               if (se->on_rq)
                        break;
-               }
                cfs_rq = cfs_rq_of(se);
                enqueue_entity(cfs_rq, se, wakeup);
                wakeup = 1;
        }
-       /* Increment cpu load if we just enqueued the first task of a group on
-        * 'rq->cpu'. 'topse' represents the group to which task 'p' belongs
-        * at the highest grouping level.
-        */
-       if (incload)
-               inc_cpu_load(rq, topse->load.weight);
 
        hrtick_start_fair(rq, rq->curr);
 }
@@ -856,28 +854,16 @@ static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
 {
        struct cfs_rq *cfs_rq;
-       struct sched_entity *se = &p->se,
-                           *topse = NULL;      /* Highest schedulable entity */
-       int decload = 1;
+       struct sched_entity *se = &p->se;
 
        for_each_sched_entity(se) {
-               topse = se;
                cfs_rq = cfs_rq_of(se);
                dequeue_entity(cfs_rq, se, sleep);
                /* Don't dequeue parent if it has other entities besides us */
-               if (cfs_rq->load.weight) {
-                       if (parent_entity(se))
-                               decload = 0;
+               if (cfs_rq->load.weight)
                        break;
-               }
                sleep = 1;
        }
-       /* Decrement cpu load if we just dequeued the last task of a group on
-        * 'rq->cpu'. 'topse' represents the group to which task 'p' belongs
-        * at the highest grouping level.
-        */
-       if (decload)
-               dec_cpu_load(rq, topse->load.weight);
 
        hrtick_start_fair(rq, rq->curr);
 }
@@ -915,7 +901,7 @@ static void yield_task_fair(struct rq *rq)
        /*
         * Already in the rightmost position?
         */
-       if (unlikely(rightmost->vruntime < se->vruntime))
+       if (unlikely(!rightmost || rightmost->vruntime < se->vruntime))
                return;
 
        /*
@@ -979,100 +965,178 @@ static inline int wake_idle(int cpu, struct task_struct *p)
 #endif
 
 #ifdef CONFIG_SMP
-static int select_task_rq_fair(struct task_struct *p, int sync)
+
+static const struct sched_class fair_sched_class;
+
+static int
+wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
+           struct task_struct *p, int prev_cpu, int this_cpu, int sync,
+           int idx, unsigned long load, unsigned long this_load,
+           unsigned int imbalance)
 {
-       int cpu, this_cpu;
-       struct rq *rq;
-       struct sched_domain *sd, *this_sd = NULL;
-       int new_cpu;
+       struct task_struct *curr = this_rq->curr;
+       unsigned long tl = this_load;
+       unsigned long tl_per_task;
+
+       if (!(this_sd->flags & SD_WAKE_AFFINE))
+               return 0;
+
+       /*
+        * If the currently running task will sleep within
+        * a reasonable amount of time then attract this newly
+        * woken task:
+        */
+       if (sync && curr->sched_class == &fair_sched_class) {
+               if (curr->se.avg_overlap < sysctl_sched_migration_cost &&
+                               p->se.avg_overlap < sysctl_sched_migration_cost)
+                       return 1;
+       }
 
-       cpu      = task_cpu(p);
-       rq       = task_rq(p);
-       this_cpu = smp_processor_id();
-       new_cpu  = cpu;
+       schedstat_inc(p, se.nr_wakeups_affine_attempts);
+       tl_per_task = cpu_avg_load_per_task(this_cpu);
 
-       if (cpu == this_cpu)
-               goto out_set_cpu;
+       /*
+        * If sync wakeup then subtract the (maximum possible)
+        * effect of the currently running task from the load
+        * of the current CPU:
+        */
+       if (sync)
+               tl -= current->se.load.weight;
+
+       if ((tl <= load && tl + target_load(prev_cpu, idx) <= tl_per_task) ||
+                       100*(tl + p->se.load.weight) <= imbalance*load) {
+               /*
+                * This domain has SD_WAKE_AFFINE and
+                * p is cache cold in this domain, and
+                * there is no bad imbalance.
+                */
+               schedstat_inc(this_sd, ttwu_move_affine);
+               schedstat_inc(p, se.nr_wakeups_affine);
+
+               return 1;
+       }
+       return 0;
+}
+
+static int select_task_rq_fair(struct task_struct *p, int sync)
+{
+       struct sched_domain *sd, *this_sd = NULL;
+       int prev_cpu, this_cpu, new_cpu;
+       unsigned long load, this_load;
+       struct rq *rq, *this_rq;
+       unsigned int imbalance;
+       int idx;
+
+       prev_cpu        = task_cpu(p);
+       rq              = task_rq(p);
+       this_cpu        = smp_processor_id();
+       this_rq         = cpu_rq(this_cpu);
+       new_cpu         = prev_cpu;
 
+       /*
+        * 'this_sd' is the first domain that both
+        * this_cpu and prev_cpu are present in:
+        */
        for_each_domain(this_cpu, sd) {
-               if (cpu_isset(cpu, sd->span)) {
+               if (cpu_isset(prev_cpu, sd->span)) {
                        this_sd = sd;
                        break;
                }
        }
 
        if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
-               goto out_set_cpu;
+               goto out;
 
        /*
         * Check for affine wakeup and passive balancing possibilities.
         */
-       if (this_sd) {
-               int idx = this_sd->wake_idx;
-               unsigned int imbalance;
-               unsigned long load, this_load;
-
-               imbalance = 100 + (this_sd->imbalance_pct - 100) / 2;
-
-               load = source_load(cpu, idx);
-               this_load = target_load(this_cpu, idx);
-
-               new_cpu = this_cpu; /* Wake to this CPU if we can */
-
-               if (this_sd->flags & SD_WAKE_AFFINE) {
-                       unsigned long tl = this_load;
-                       unsigned long tl_per_task;
-
-                       /*
-                        * Attract cache-cold tasks on sync wakeups:
-                        */
-                       if (sync && !task_hot(p, rq->clock, this_sd))
-                               goto out_set_cpu;
-
-                       schedstat_inc(p, se.nr_wakeups_affine_attempts);
-                       tl_per_task = cpu_avg_load_per_task(this_cpu);
-
-                       /*
-                        * If sync wakeup then subtract the (maximum possible)
-                        * effect of the currently running task from the load
-                        * of the current CPU:
-                        */
-                       if (sync)
-                               tl -= current->se.load.weight;
-
-                       if ((tl <= load &&
-                               tl + target_load(cpu, idx) <= tl_per_task) ||
-                              100*(tl + p->se.load.weight) <= imbalance*load) {
-                               /*
-                                * This domain has SD_WAKE_AFFINE and
-                                * p is cache cold in this domain, and
-                                * there is no bad imbalance.
-                                */
-                               schedstat_inc(this_sd, ttwu_move_affine);
-                               schedstat_inc(p, se.nr_wakeups_affine);
-                               goto out_set_cpu;
-                       }
-               }
+       if (!this_sd)
+               goto out;
 
-               /*
-                * Start passive balancing when half the imbalance_pct
-                * limit is reached.
-                */
-               if (this_sd->flags & SD_WAKE_BALANCE) {
-                       if (imbalance*this_load <= 100*load) {
-                               schedstat_inc(this_sd, ttwu_move_balance);
-                               schedstat_inc(p, se.nr_wakeups_passive);
-                               goto out_set_cpu;
-                       }
+       idx = this_sd->wake_idx;
+
+       imbalance = 100 + (this_sd->imbalance_pct - 100) / 2;
+
+       load = source_load(prev_cpu, idx);
+       this_load = target_load(this_cpu, idx);
+
+       if (wake_affine(rq, this_sd, this_rq, p, prev_cpu, this_cpu, sync, idx,
+                                    load, this_load, imbalance))
+               return this_cpu;
+
+       if (prev_cpu == this_cpu)
+               goto out;
+
+       /*
+        * Start passive balancing when half the imbalance_pct
+        * limit is reached.
+        */
+       if (this_sd->flags & SD_WAKE_BALANCE) {
+               if (imbalance*this_load <= 100*load) {
+                       schedstat_inc(this_sd, ttwu_move_balance);
+                       schedstat_inc(p, se.nr_wakeups_passive);
+                       return this_cpu;
                }
        }
 
-       new_cpu = cpu; /* Could not wake to this_cpu. Wake to cpu instead */
-out_set_cpu:
+out:
        return wake_idle(new_cpu, p);
 }
 #endif /* CONFIG_SMP */
 
+static unsigned long wakeup_gran(struct sched_entity *se)
+{
+       unsigned long gran = sysctl_sched_wakeup_granularity;
+
+       /*
+        * More easily preempt - nice tasks, while not making
+        * it harder for + nice tasks.
+        */
+       if (unlikely(se->load.weight > NICE_0_LOAD))
+               gran = calc_delta_fair(gran, &se->load);
+
+       return gran;
+}
+
+/*
+ * Should 'se' preempt 'curr'.
+ *
+ *             |s1
+ *        |s2
+ *   |s3
+ *         g
+ *      |<--->|c
+ *
+ *  w(c, s1) = -1
+ *  w(c, s2) =  0
+ *  w(c, s3) =  1
+ *
+ */
+static int
+wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
+{
+       s64 gran, vdiff = curr->vruntime - se->vruntime;
+
+       if (vdiff < 0)
+               return -1;
+
+       gran = wakeup_gran(curr);
+       if (vdiff > gran)
+               return 1;
+
+       return 0;
+}
+
+/* return depth at which a sched entity is present in the hierarchy */
+static inline int depth_se(struct sched_entity *se)
+{
+       int depth = 0;
+
+       for_each_sched_entity(se)
+               depth++;
+
+       return depth;
+}
 
 /*
  * Preempt the current task with a newly woken task if needed:
@@ -1082,7 +1146,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
        struct task_struct *curr = rq->curr;
        struct cfs_rq *cfs_rq = task_cfs_rq(curr);
        struct sched_entity *se = &curr->se, *pse = &p->se;
-       unsigned long gran;
+       int se_depth, pse_depth;
 
        if (unlikely(rt_prio(p->prio))) {
                update_rq_clock(rq);
@@ -1090,6 +1154,13 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
                resched_task(curr);
                return;
        }
+
+       se->last_wakeup = se->sum_exec_runtime;
+       if (unlikely(se == pse))
+               return;
+
+       cfs_rq_of(pse)->next = pse;
+
        /*
         * Batch tasks do not preempt (their preemption is driven by
         * the tick):
@@ -1100,20 +1171,33 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
        if (!sched_feat(WAKEUP_PREEMPT))
                return;
 
-       while (!is_same_group(se, pse)) {
+       /*
+        * preemption test can be made between sibling entities who are in the
+        * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
+        * both tasks until we find their ancestors who are siblings of common
+        * parent.
+        */
+
+       /* First walk up until both entities are at same depth */
+       se_depth = depth_se(se);
+       pse_depth = depth_se(pse);
+
+       while (se_depth > pse_depth) {
+               se_depth--;
                se = parent_entity(se);
+       }
+
+       while (pse_depth > se_depth) {
+               pse_depth--;
                pse = parent_entity(pse);
        }
 
-       gran = sysctl_sched_wakeup_granularity;
-       /*
-        * More easily preempt - nice tasks, while not making
-        * it harder for + nice tasks.
-        */
-       if (unlikely(se->load.weight > NICE_0_LOAD))
-               gran = calc_delta_fair(gran, &se->load);
+       while (!is_same_group(se, pse)) {
+               se = parent_entity(se);
+               pse = parent_entity(pse);
+       }
 
-       if (pse->vruntime + gran < se->vruntime)
+       if (wakeup_preempt_entity(se, pse) == 1)
                resched_task(curr);
 }
 
@@ -1166,13 +1250,22 @@ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
 static struct task_struct *
 __load_balance_iterator(struct cfs_rq *cfs_rq, struct rb_node *curr)
 {
-       struct task_struct *p;
+       struct task_struct *p = NULL;
+       struct sched_entity *se;
 
        if (!curr)
                return NULL;
 
-       p = rb_entry(curr, struct task_struct, se.run_node);
-       cfs_rq->rb_load_balance_curr = rb_next(curr);
+       /* Skip over entities that are not tasks */
+       do {
+               se = rb_entry(curr, struct sched_entity, run_node);
+               curr = rb_next(curr);
+       } while (curr && !entity_is_task(se));
+
+       cfs_rq->rb_load_balance_curr = curr;
+
+       if (entity_is_task(se))
+               p = task_of(se);
 
        return p;
 }
@@ -1191,6 +1284,25 @@ static struct task_struct *load_balance_next_fair(void *arg)
        return __load_balance_iterator(cfs_rq, cfs_rq->rb_load_balance_curr);
 }
 
+#ifdef CONFIG_FAIR_GROUP_SCHED
+static int cfs_rq_best_prio(struct cfs_rq *cfs_rq)
+{
+       struct sched_entity *curr;
+       struct task_struct *p;
+
+       if (!cfs_rq->nr_running || !first_fair(cfs_rq))
+               return MAX_PRIO;
+
+       curr = cfs_rq->curr;
+       if (!curr)
+               curr = __pick_next_entity(cfs_rq);
+
+       p = task_of(curr);
+
+       return p->prio;
+}
+#endif
+
 static unsigned long
 load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
                  unsigned long max_load_move,
@@ -1200,45 +1312,28 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
        struct cfs_rq *busy_cfs_rq;
        long rem_load_move = max_load_move;
        struct rq_iterator cfs_rq_iterator;
-       unsigned long load_moved;
 
        cfs_rq_iterator.start = load_balance_start_fair;
        cfs_rq_iterator.next = load_balance_next_fair;
 
        for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
 #ifdef CONFIG_FAIR_GROUP_SCHED
-               struct cfs_rq *this_cfs_rq = busy_cfs_rq->tg->cfs_rq[this_cpu];
-               unsigned long maxload, task_load, group_weight;
-               unsigned long thisload, per_task_load;
-               struct sched_entity *se = busy_cfs_rq->tg->se[busiest->cpu];
-
-               task_load = busy_cfs_rq->load.weight;
-               group_weight = se->load.weight;
+               struct cfs_rq *this_cfs_rq;
+               long imbalance;
+               unsigned long maxload;
 
-               /*
-                * 'group_weight' is contributed by tasks of total weight
-                * 'task_load'. To move 'rem_load_move' worth of weight only,
-                * we need to move a maximum task load of:
-                *
-                *      maxload = (remload / group_weight) * task_load;
-                */
-               maxload = (rem_load_move * task_load) / group_weight;
+               this_cfs_rq = cpu_cfs_rq(busy_cfs_rq, this_cpu);
 
-               if (!maxload || !task_load)
+               imbalance = busy_cfs_rq->load.weight - this_cfs_rq->load.weight;
+               /* Don't pull if this_cfs_rq has more load than busy_cfs_rq */
+               if (imbalance <= 0)
                        continue;
 
-               per_task_load = task_load / busy_cfs_rq->nr_running;
-               /*
-                * balance_tasks will try to forcibly move atleast one task if
-                * possible (because of SCHED_LOAD_SCALE_FUZZ). Avoid that if
-                * maxload is less than GROUP_IMBALANCE_FUZZ% the per_task_load.
-                */
-                if (100 * maxload < GROUP_IMBALANCE_PCT * per_task_load)
-                       continue;
+               /* Don't pull more than imbalance/2 */
+               imbalance /= 2;
+               maxload = min(rem_load_move, imbalance);
 
-               /* Disable priority-based load balance */
-               *this_best_prio = 0;
-               thisload = this_cfs_rq->load.weight;
+               *this_best_prio = cfs_rq_best_prio(this_cfs_rq);
 #else
 # define maxload rem_load_move
 #endif
@@ -1247,33 +1342,11 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
                 * load_balance_[start|next]_fair iterators
                 */
                cfs_rq_iterator.arg = busy_cfs_rq;
-               load_moved = balance_tasks(this_rq, this_cpu, busiest,
+               rem_load_move -= balance_tasks(this_rq, this_cpu, busiest,
                                               maxload, sd, idle, all_pinned,
                                               this_best_prio,
                                               &cfs_rq_iterator);
 
-#ifdef CONFIG_FAIR_GROUP_SCHED
-               /*
-                * load_moved holds the task load that was moved. The
-                * effective (group) weight moved would be:
-                *      load_moved_eff = load_moved/task_load * group_weight;
-                */
-               load_moved = (group_weight * load_moved) / task_load;
-
-               /* Adjust shares on both cpus to reflect load_moved */
-               group_weight -= load_moved;
-               set_se_shares(se, group_weight);
-
-               se = busy_cfs_rq->tg->se[this_cpu];
-               if (!thisload)
-                       group_weight = load_moved;
-               else
-                       group_weight = se->load.weight + load_moved;
-               set_se_shares(se, group_weight);
-#endif
-
-               rem_load_move -= load_moved;
-
                if (rem_load_move <= 0)
                        break;
        }
@@ -1403,6 +1476,16 @@ static void set_curr_task_fair(struct rq *rq)
                set_next_entity(cfs_rq_of(se), se);
 }
 
+#ifdef CONFIG_FAIR_GROUP_SCHED
+static void moved_group_fair(struct task_struct *p)
+{
+       struct cfs_rq *cfs_rq = task_cfs_rq(p);
+
+       update_curr(cfs_rq);
+       place_entity(cfs_rq, &p->se, 1);
+}
+#endif
+
 /*
  * All the scheduling class methods:
  */
@@ -1431,6 +1514,10 @@ static const struct sched_class fair_sched_class = {
 
        .prio_changed           = prio_changed_fair,
        .switched_to            = switched_to_fair,
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+       .moved_group            = moved_group_fair,
+#endif
 };
 
 #ifdef CONFIG_SCHED_DEBUG
@@ -1438,9 +1525,6 @@ static void print_cfs_stats(struct seq_file *m, int cpu)
 {
        struct cfs_rq *cfs_rq;
 
-#ifdef CONFIG_FAIR_GROUP_SCHED
-       print_cfs_rq(m, cpu, &cpu_rq(cpu)->cfs);
-#endif
        rcu_read_lock();
        for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
                print_cfs_rq(m, cpu, cfs_rq);