sched: Remove sysctl.sched_features
[safe/jmp/linux-2.6] / kernel / sched.c
index 6244d24..68db5a2 100644 (file)
@@ -39,7 +39,7 @@
 #include <linux/completion.h>
 #include <linux/kernel_stat.h>
 #include <linux/debug_locks.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
 #include <linux/security.h>
 #include <linux/notifier.h>
 #include <linux/profile.h>
@@ -64,7 +64,6 @@
 #include <linux/tsacct_kern.h>
 #include <linux/kprobes.h>
 #include <linux/delayacct.h>
-#include <linux/reciprocal_div.h>
 #include <linux/unistd.h>
 #include <linux/pagemap.h>
 #include <linux/hrtimer.h>
  */
 #define RUNTIME_INF    ((u64)~0ULL)
 
-#ifdef CONFIG_SMP
-
-static void double_rq_lock(struct rq *rq1, struct rq *rq2);
-
-/*
- * Divide a load by a sched group cpu_power : (load / sg->__cpu_power)
- * Since cpu_power is a 'constant', we can use a reciprocal divide.
- */
-static inline u32 sg_div_cpu_power(const struct sched_group *sg, u32 load)
-{
-       return reciprocal_divide(load, sg->reciprocal_cpu_power);
-}
-
-/*
- * Each time a sched group cpu_power is changed,
- * we must compute its reciprocal value
- */
-static inline void sg_inc_cpu_power(struct sched_group *sg, u32 val)
-{
-       sg->__cpu_power += val;
-       sg->reciprocal_cpu_power = reciprocal_value(sg->__cpu_power);
-}
-#endif
-
 static inline int rt_policy(int policy)
 {
        if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR))
@@ -318,12 +293,12 @@ struct task_group root_task_group;
 /* Default task group's sched entity on each cpu */
 static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
 /* Default task group's cfs_rq on each cpu */
-static DEFINE_PER_CPU(struct cfs_rq, init_tg_cfs_rq) ____cacheline_aligned_in_smp;
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_tg_cfs_rq);
 #endif /* CONFIG_FAIR_GROUP_SCHED */
 
 #ifdef CONFIG_RT_GROUP_SCHED
 static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
-static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp;
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq);
 #endif /* CONFIG_RT_GROUP_SCHED */
 #else /* !CONFIG_USER_SCHED */
 #define root_task_group init_task_group
@@ -334,6 +309,8 @@ static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp;
  */
 static DEFINE_SPINLOCK(task_group_lock);
 
+#ifdef CONFIG_FAIR_GROUP_SCHED
+
 #ifdef CONFIG_SMP
 static int root_task_group_empty(void)
 {
@@ -341,7 +318,6 @@ static int root_task_group_empty(void)
 }
 #endif
 
-#ifdef CONFIG_FAIR_GROUP_SCHED
 #ifdef CONFIG_USER_SCHED
 # define INIT_TASK_GROUP_LOAD  (2*NICE_0_LOAD)
 #else /* !CONFIG_USER_SCHED */
@@ -401,13 +377,6 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
 
 #else
 
-#ifdef CONFIG_SMP
-static int root_task_group_empty(void)
-{
-       return 1;
-}
-#endif
-
 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
 static inline struct task_group *task_group(struct task_struct *p)
 {
@@ -537,14 +506,6 @@ struct root_domain {
 #ifdef CONFIG_SMP
        struct cpupri cpupri;
 #endif
-#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
-       /*
-        * Preferred wake up cpu nominated by sched_mc balance that will be
-        * used when most cpus are idle in the system indicating overall very
-        * low system utilisation. Triggered at POWERSAVINGS_BALANCE_WAKEUP(2)
-        */
-       unsigned int sched_mc_preferred_wakeup_cpu;
-#endif
 };
 
 /*
@@ -574,14 +535,12 @@ struct rq {
        #define CPU_LOAD_IDX_MAX 5
        unsigned long cpu_load[CPU_LOAD_IDX_MAX];
 #ifdef CONFIG_NO_HZ
-       unsigned long last_tick_seen;
        unsigned char in_nohz_recently;
 #endif
        /* capture load from *all* tasks on this cpu: */
        struct load_weight load;
        unsigned long nr_load_updates;
        u64 nr_switches;
-       u64 nr_migrations_in;
 
        struct cfs_rq cfs;
        struct rt_rq rt;
@@ -627,6 +586,11 @@ struct rq {
 
        struct task_struct *migration_thread;
        struct list_head migration_queue;
+
+       u64 rt_avg;
+       u64 age_stamp;
+       u64 idle_stamp;
+       u64 avg_idle;
 #endif
 
        /* calc_load related fields */
@@ -666,9 +630,10 @@ struct rq {
 
 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
 
-static inline void check_preempt_curr(struct rq *rq, struct task_struct *p, int sync)
+static inline
+void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
 {
-       rq->curr->sched_class->check_preempt_curr(rq, p, sync);
+       rq->curr->sched_class->check_preempt_curr(rq, p, flags);
 }
 
 static inline int cpu_of(struct rq *rq)
@@ -712,20 +677,15 @@ inline void update_rq_clock(struct rq *rq)
 
 /**
  * runqueue_is_locked
+ * @cpu: the processor in question.
  *
  * Returns true if the current cpu runqueue is locked.
  * This interface allows printk to be called with the runqueue lock
  * held and know whether or not it is OK to wake up the klogd.
  */
-int runqueue_is_locked(void)
+int runqueue_is_locked(int cpu)
 {
-       int cpu = get_cpu();
-       struct rq *rq = cpu_rq(cpu);
-       int ret;
-
-       ret = spin_is_locked(&rq->lock);
-       put_cpu();
-       return ret;
+       return spin_is_locked(&cpu_rq(cpu)->lock);
 }
 
 /*
@@ -812,7 +772,7 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
        if (!sched_feat_names[i])
                return -EINVAL;
 
-       filp->f_pos += cnt;
+       *ppos += cnt;
 
        return cnt;
 }
@@ -822,7 +782,7 @@ static int sched_feat_open(struct inode *inode, struct file *filp)
        return single_open(filp, sched_feat_show, NULL);
 }
 
-static struct file_operations sched_feat_fops = {
+static const struct file_operations sched_feat_fops = {
        .open           = sched_feat_open,
        .write          = sched_feat_write,
        .read           = seq_read,
@@ -863,6 +823,14 @@ unsigned int sysctl_sched_shares_ratelimit = 250000;
 unsigned int sysctl_sched_shares_thresh = 4;
 
 /*
+ * period over which we average the RT time consumption, measured
+ * in ms.
+ *
+ * default: 1s
+ */
+const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
+
+/*
  * period over which we measure -rt task cpu usage in us.
  * default: 1s
  */
@@ -1280,12 +1248,37 @@ void wake_up_idle_cpu(int cpu)
 }
 #endif /* CONFIG_NO_HZ */
 
+static u64 sched_avg_period(void)
+{
+       return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
+}
+
+static void sched_avg_update(struct rq *rq)
+{
+       s64 period = sched_avg_period();
+
+       while ((s64)(rq->clock - rq->age_stamp) > period) {
+               rq->age_stamp += period;
+               rq->rt_avg /= 2;
+       }
+}
+
+static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
+{
+       rq->rt_avg += rt_delta;
+       sched_avg_update(rq);
+}
+
 #else /* !CONFIG_SMP */
 static void resched_task(struct task_struct *p)
 {
        assert_spin_locked(&task_rq(p)->lock);
        set_tsk_need_resched(p);
 }
+
+static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
+{
+}
 #endif /* CONFIG_SMP */
 
 #if BITS_PER_LONG == 32
@@ -1496,8 +1489,65 @@ static int tg_nop(struct task_group *tg, void *data)
 #endif
 
 #ifdef CONFIG_SMP
-static unsigned long source_load(int cpu, int type);
-static unsigned long target_load(int cpu, int type);
+/* Used instead of source_load when we know the type == 0 */
+static unsigned long weighted_cpuload(const int cpu)
+{
+       return cpu_rq(cpu)->load.weight;
+}
+
+/*
+ * Return a low guess at the load of a migration-source cpu weighted
+ * according to the scheduling class and "nice" value.
+ *
+ * We want to under-estimate the load of migration sources, to
+ * balance conservatively.
+ */
+static unsigned long source_load(int cpu, int type)
+{
+       struct rq *rq = cpu_rq(cpu);
+       unsigned long total = weighted_cpuload(cpu);
+
+       if (type == 0 || !sched_feat(LB_BIAS))
+               return total;
+
+       return min(rq->cpu_load[type-1], total);
+}
+
+/*
+ * Return a high guess at the load of a migration-target cpu weighted
+ * according to the scheduling class and "nice" value.
+ */
+static unsigned long target_load(int cpu, int type)
+{
+       struct rq *rq = cpu_rq(cpu);
+       unsigned long total = weighted_cpuload(cpu);
+
+       if (type == 0 || !sched_feat(LB_BIAS))
+               return total;
+
+       return max(rq->cpu_load[type-1], total);
+}
+
+static struct sched_group *group_of(int cpu)
+{
+       struct sched_domain *sd = rcu_dereference(cpu_rq(cpu)->sd);
+
+       if (!sd)
+               return NULL;
+
+       return sd->groups;
+}
+
+static unsigned long power_of(int cpu)
+{
+       struct sched_group *group = group_of(cpu);
+
+       if (!group)
+               return SCHED_LOAD_SCALE;
+
+       return group->cpu_power;
+}
+
 static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
 
 static unsigned long cpu_avg_load_per_task(int cpu)
@@ -1515,11 +1565,7 @@ static unsigned long cpu_avg_load_per_task(int cpu)
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
 
-struct update_shares_data {
-       unsigned long rq_weight[NR_CPUS];
-};
-
-static DEFINE_PER_CPU(struct update_shares_data, update_shares_data);
+static __read_mostly unsigned long *update_shares_data;
 
 static void __set_se_shares(struct sched_entity *se, unsigned long shares);
 
@@ -1529,12 +1575,12 @@ static void __set_se_shares(struct sched_entity *se, unsigned long shares);
 static void update_group_shares_cpu(struct task_group *tg, int cpu,
                                    unsigned long sd_shares,
                                    unsigned long sd_rq_weight,
-                                   struct update_shares_data *usd)
+                                   unsigned long *usd_rq_weight)
 {
        unsigned long shares, rq_weight;
        int boost = 0;
 
-       rq_weight = usd->rq_weight[cpu];
+       rq_weight = usd_rq_weight[cpu];
        if (!rq_weight) {
                boost = 1;
                rq_weight = NICE_0_LOAD;
@@ -1569,7 +1615,7 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu,
 static int tg_shares_up(struct task_group *tg, void *data)
 {
        unsigned long weight, rq_weight = 0, shares = 0;
-       struct update_shares_data *usd;
+       unsigned long *usd_rq_weight;
        struct sched_domain *sd = data;
        unsigned long flags;
        int i;
@@ -1578,11 +1624,11 @@ static int tg_shares_up(struct task_group *tg, void *data)
                return 0;
 
        local_irq_save(flags);
-       usd = &__get_cpu_var(update_shares_data);
+       usd_rq_weight = per_cpu_ptr(update_shares_data, smp_processor_id());
 
        for_each_cpu(i, sched_domain_span(sd)) {
                weight = tg->cfs_rq[i]->load.weight;
-               usd->rq_weight[i] = weight;
+               usd_rq_weight[i] = weight;
 
                /*
                 * If there are currently no tasks on the cpu pretend there
@@ -1603,7 +1649,7 @@ static int tg_shares_up(struct task_group *tg, void *data)
                shares = tg->shares;
 
        for_each_cpu(i, sched_domain_span(sd))
-               update_group_shares_cpu(tg, i, shares, rq_weight, usd);
+               update_group_shares_cpu(tg, i, shares, rq_weight, usd_rq_weight);
 
        local_irq_restore(flags);
 
@@ -1682,6 +1728,8 @@ static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd)
 
 #ifdef CONFIG_PREEMPT
 
+static void double_rq_lock(struct rq *rq1, struct rq *rq2);
+
 /*
  * fair double_lock_balance: Safely acquires both rq->locks in a fair
  * way at the expense of forcing extra atomic operations in all
@@ -1945,14 +1993,40 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
                p->sched_class->prio_changed(rq, p, oldprio, running);
 }
 
-#ifdef CONFIG_SMP
-
-/* Used instead of source_load when we know the type == 0 */
-static unsigned long weighted_cpuload(const int cpu)
+/**
+ * kthread_bind - bind a just-created kthread to a cpu.
+ * @p: thread created by kthread_create().
+ * @cpu: cpu (might not be online, must be possible) for @k to run on.
+ *
+ * Description: This function is equivalent to set_cpus_allowed(),
+ * except that @cpu doesn't need to be online, and the thread must be
+ * stopped (i.e., just returned from kthread_create()).
+ *
+ * Function lives here instead of kthread.c because it messes with
+ * scheduler internals which require locking.
+ */
+void kthread_bind(struct task_struct *p, unsigned int cpu)
 {
-       return cpu_rq(cpu)->load.weight;
+       struct rq *rq = cpu_rq(cpu);
+       unsigned long flags;
+
+       /* Must have done schedule() in kthread() before we set_task_cpu */
+       if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
+               WARN_ON(1);
+               return;
+       }
+
+       spin_lock_irqsave(&rq->lock, flags);
+       update_rq_clock(rq);
+       set_task_cpu(p, cpu);
+       p->cpus_allowed = cpumask_of_cpu(cpu);
+       p->rt.nr_cpus_allowed = 1;
+       p->flags |= PF_THREAD_BOUND;
+       spin_unlock_irqrestore(&rq->lock, flags);
 }
+EXPORT_SYMBOL(kthread_bind);
 
+#ifdef CONFIG_SMP
 /*
  * Is this task likely cache-hot:
  */
@@ -1964,7 +2038,7 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
        /*
         * Buddy candidates are cache hot:
         */
-       if (sched_feat(CACHE_HOT_BUDDY) &&
+       if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
                        (&p->se == cfs_rq_of(&p->se)->next ||
                         &p->se == cfs_rq_of(&p->se)->last))
                return 1;
@@ -2005,12 +2079,11 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
 #endif
        if (old_cpu != new_cpu) {
                p->se.nr_migrations++;
-               new_rq->nr_migrations_in++;
 #ifdef CONFIG_SCHEDSTATS
                if (task_hot(p, old_rq->clock, NULL))
                        schedstat_inc(p, se.nr_forced2_migrations);
 #endif
-               perf_swcounter_event(PERF_COUNT_SW_CPU_MIGRATIONS,
+               perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS,
                                     1, 1, NULL, 0);
        }
        p->se.vruntime -= old_cfsrq->min_vruntime -
@@ -2042,6 +2115,7 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
         * it is sufficient to simply update the task's cpu field.
         */
        if (!p->se.on_rq && !task_running(rq, p)) {
+               update_rq_clock(rq);
                set_task_cpu(p, dest_cpu);
                return 0;
        }
@@ -2226,186 +2300,6 @@ void kick_process(struct task_struct *p)
        preempt_enable();
 }
 EXPORT_SYMBOL_GPL(kick_process);
-
-/*
- * Return a low guess at the load of a migration-source cpu weighted
- * according to the scheduling class and "nice" value.
- *
- * We want to under-estimate the load of migration sources, to
- * balance conservatively.
- */
-static unsigned long source_load(int cpu, int type)
-{
-       struct rq *rq = cpu_rq(cpu);
-       unsigned long total = weighted_cpuload(cpu);
-
-       if (type == 0 || !sched_feat(LB_BIAS))
-               return total;
-
-       return min(rq->cpu_load[type-1], total);
-}
-
-/*
- * Return a high guess at the load of a migration-target cpu weighted
- * according to the scheduling class and "nice" value.
- */
-static unsigned long target_load(int cpu, int type)
-{
-       struct rq *rq = cpu_rq(cpu);
-       unsigned long total = weighted_cpuload(cpu);
-
-       if (type == 0 || !sched_feat(LB_BIAS))
-               return total;
-
-       return max(rq->cpu_load[type-1], total);
-}
-
-/*
- * find_idlest_group finds and returns the least busy CPU group within the
- * domain.
- */
-static struct sched_group *
-find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
-{
-       struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
-       unsigned long min_load = ULONG_MAX, this_load = 0;
-       int load_idx = sd->forkexec_idx;
-       int imbalance = 100 + (sd->imbalance_pct-100)/2;
-
-       do {
-               unsigned long load, avg_load;
-               int local_group;
-               int i;
-
-               /* Skip over this group if it has no CPUs allowed */
-               if (!cpumask_intersects(sched_group_cpus(group),
-                                       &p->cpus_allowed))
-                       continue;
-
-               local_group = cpumask_test_cpu(this_cpu,
-                                              sched_group_cpus(group));
-
-               /* Tally up the load of all CPUs in the group */
-               avg_load = 0;
-
-               for_each_cpu(i, sched_group_cpus(group)) {
-                       /* Bias balancing toward cpus of our domain */
-                       if (local_group)
-                               load = source_load(i, load_idx);
-                       else
-                               load = target_load(i, load_idx);
-
-                       avg_load += load;
-               }
-
-               /* Adjust by relative CPU power of the group */
-               avg_load = sg_div_cpu_power(group,
-                               avg_load * SCHED_LOAD_SCALE);
-
-               if (local_group) {
-                       this_load = avg_load;
-                       this = group;
-               } else if (avg_load < min_load) {
-                       min_load = avg_load;
-                       idlest = group;
-               }
-       } while (group = group->next, group != sd->groups);
-
-       if (!idlest || 100*this_load < imbalance*min_load)
-               return NULL;
-       return idlest;
-}
-
-/*
- * find_idlest_cpu - find the idlest cpu among the cpus in group.
- */
-static int
-find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
-{
-       unsigned long load, min_load = ULONG_MAX;
-       int idlest = -1;
-       int i;
-
-       /* Traverse only the allowed CPUs */
-       for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
-               load = weighted_cpuload(i);
-
-               if (load < min_load || (load == min_load && i == this_cpu)) {
-                       min_load = load;
-                       idlest = i;
-               }
-       }
-
-       return idlest;
-}
-
-/*
- * sched_balance_self: balance the current task (running on cpu) in domains
- * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
- * SD_BALANCE_EXEC.
- *
- * Balance, ie. select the least loaded group.
- *
- * Returns the target CPU number, or the same CPU if no balancing is needed.
- *
- * preempt must be disabled.
- */
-static int sched_balance_self(int cpu, int flag)
-{
-       struct task_struct *t = current;
-       struct sched_domain *tmp, *sd = NULL;
-
-       for_each_domain(cpu, tmp) {
-               /*
-                * If power savings logic is enabled for a domain, stop there.
-                */
-               if (tmp->flags & SD_POWERSAVINGS_BALANCE)
-                       break;
-               if (tmp->flags & flag)
-                       sd = tmp;
-       }
-
-       if (sd)
-               update_shares(sd);
-
-       while (sd) {
-               struct sched_group *group;
-               int new_cpu, weight;
-
-               if (!(sd->flags & flag)) {
-                       sd = sd->child;
-                       continue;
-               }
-
-               group = find_idlest_group(sd, t, cpu);
-               if (!group) {
-                       sd = sd->child;
-                       continue;
-               }
-
-               new_cpu = find_idlest_cpu(group, t, cpu);
-               if (new_cpu == -1 || new_cpu == cpu) {
-                       /* Now try balancing at a lower domain level of cpu */
-                       sd = sd->child;
-                       continue;
-               }
-
-               /* Now try balancing at a lower domain level of new_cpu */
-               cpu = new_cpu;
-               weight = cpumask_weight(sched_domain_span(sd));
-               sd = NULL;
-               for_each_domain(cpu, tmp) {
-                       if (weight <= cpumask_weight(sched_domain_span(tmp)))
-                               break;
-                       if (tmp->flags & flag)
-                               sd = tmp;
-               }
-               /* while loop will break here if sd == NULL */
-       }
-
-       return cpu;
-}
-
 #endif /* CONFIG_SMP */
 
 /**
@@ -2443,37 +2337,22 @@ void task_oncpu_function_call(struct task_struct *p,
  *
  * returns failure only if the task is already active.
  */
-static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
+static int try_to_wake_up(struct task_struct *p, unsigned int state,
+                         int wake_flags)
 {
        int cpu, orig_cpu, this_cpu, success = 0;
        unsigned long flags;
-       long old_state;
-       struct rq *rq;
+       struct rq *rq, *orig_rq;
 
        if (!sched_feat(SYNC_WAKEUPS))
-               sync = 0;
-
-#ifdef CONFIG_SMP
-       if (sched_feat(LB_WAKEUP_UPDATE) && !root_task_group_empty()) {
-               struct sched_domain *sd;
+               wake_flags &= ~WF_SYNC;
 
-               this_cpu = raw_smp_processor_id();
-               cpu = task_cpu(p);
-
-               for_each_domain(this_cpu, sd) {
-                       if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
-                               update_shares(sd);
-                               break;
-                       }
-               }
-       }
-#endif
+       this_cpu = get_cpu();
 
        smp_wmb();
-       rq = task_rq_lock(p, &flags);
+       rq = orig_rq = task_rq_lock(p, &flags);
        update_rq_clock(rq);
-       old_state = p->state;
-       if (!(old_state & state))
+       if (!(p->state & state))
                goto out;
 
        if (p->se.on_rq)
@@ -2481,27 +2360,34 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
 
        cpu = task_cpu(p);
        orig_cpu = cpu;
-       this_cpu = smp_processor_id();
 
 #ifdef CONFIG_SMP
        if (unlikely(task_running(rq, p)))
                goto out_activate;
 
-       cpu = p->sched_class->select_task_rq(p, sync);
+       /*
+        * In order to handle concurrent wakeups and release the rq->lock
+        * we put the task in TASK_WAKING state.
+        *
+        * First fix up the nr_uninterruptible count:
+        */
+       if (task_contributes_to_load(p))
+               rq->nr_uninterruptible--;
+       p->state = TASK_WAKING;
+       task_rq_unlock(rq, &flags);
+
+       cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
        if (cpu != orig_cpu) {
+               local_irq_save(flags);
+               rq = cpu_rq(cpu);
+               update_rq_clock(rq);
                set_task_cpu(p, cpu);
-               task_rq_unlock(rq, &flags);
-               /* might preempt at this point */
-               rq = task_rq_lock(p, &flags);
-               old_state = p->state;
-               if (!(old_state & state))
-                       goto out;
-               if (p->se.on_rq)
-                       goto out_running;
-
-               this_cpu = smp_processor_id();
-               cpu = task_cpu(p);
+               local_irq_restore(flags);
        }
+       rq = task_rq_lock(p, &flags);
+
+       WARN_ON(p->state != TASK_WAKING);
+       cpu = task_cpu(p);
 
 #ifdef CONFIG_SCHEDSTATS
        schedstat_inc(rq, ttwu_count);
@@ -2521,7 +2407,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
 out_activate:
 #endif /* CONFIG_SMP */
        schedstat_inc(p, se.nr_wakeups);
-       if (sync)
+       if (wake_flags & WF_SYNC)
                schedstat_inc(p, se.nr_wakeups_sync);
        if (orig_cpu != cpu)
                schedstat_inc(p, se.nr_wakeups_migrate);
@@ -2550,15 +2436,27 @@ out_activate:
 
 out_running:
        trace_sched_wakeup(rq, p, success);
-       check_preempt_curr(rq, p, sync);
+       check_preempt_curr(rq, p, wake_flags);
 
        p->state = TASK_RUNNING;
 #ifdef CONFIG_SMP
        if (p->sched_class->task_wake_up)
                p->sched_class->task_wake_up(rq, p);
+
+       if (unlikely(rq->idle_stamp)) {
+               u64 delta = rq->clock - rq->idle_stamp;
+               u64 max = 2*sysctl_sched_migration_cost;
+
+               if (delta > max)
+                       rq->avg_idle = max;
+               else
+                       update_avg(&rq->avg_idle, delta);
+               rq->idle_stamp = 0;
+       }
 #endif
 out:
        task_rq_unlock(rq, &flags);
+       put_cpu();
 
        return success;
 }
@@ -2601,6 +2499,7 @@ static void __sched_fork(struct task_struct *p)
        p->se.avg_overlap               = 0;
        p->se.start_runtime             = 0;
        p->se.avg_wakeup                = sysctl_sched_wakeup_granularity;
+       p->se.avg_running               = 0;
 
 #ifdef CONFIG_SCHEDSTATS
        p->se.wait_start                        = 0;
@@ -2659,31 +2558,22 @@ static void __sched_fork(struct task_struct *p)
 void sched_fork(struct task_struct *p, int clone_flags)
 {
        int cpu = get_cpu();
+       unsigned long flags;
 
        __sched_fork(p);
 
-#ifdef CONFIG_SMP
-       cpu = sched_balance_self(cpu, SD_BALANCE_FORK);
-#endif
-       set_task_cpu(p, cpu);
-
-       /*
-        * Make sure we do not leak PI boosting priority to the child.
-        */
-       p->prio = current->normal_prio;
-
        /*
         * Revert to default priority/policy on fork if requested.
         */
        if (unlikely(p->sched_reset_on_fork)) {
-               if (p->policy == SCHED_FIFO || p->policy == SCHED_RR)
+               if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) {
                        p->policy = SCHED_NORMAL;
-
-               if (p->normal_prio < DEFAULT_PRIO)
-                       p->prio = DEFAULT_PRIO;
+                       p->normal_prio = p->static_prio;
+               }
 
                if (PRIO_TO_NICE(p->static_prio) < 0) {
                        p->static_prio = NICE_TO_PRIO(0);
+                       p->normal_prio = p->static_prio;
                        set_load_weight(p);
                }
 
@@ -2694,9 +2584,22 @@ void sched_fork(struct task_struct *p, int clone_flags)
                p->sched_reset_on_fork = 0;
        }
 
+       /*
+        * Make sure we do not leak PI boosting priority to the child.
+        */
+       p->prio = current->normal_prio;
+
        if (!rt_prio(p->prio))
                p->sched_class = &fair_sched_class;
 
+#ifdef CONFIG_SMP
+       cpu = p->sched_class->select_task_rq(p, SD_BALANCE_FORK, 0);
+#endif
+       local_irq_save(flags);
+       update_rq_clock(cpu_rq(cpu));
+       set_task_cpu(p, cpu);
+       local_irq_restore(flags);
+
 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
        if (likely(sched_info_on()))
                memset(&p->sched_info, 0, sizeof(p->sched_info));
@@ -2729,8 +2632,6 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
        BUG_ON(p->state != TASK_RUNNING);
        update_rq_clock(rq);
 
-       p->prio = effective_prio(p);
-
        if (!p->sched_class->task_new || !current->se.on_rq) {
                activate_task(rq, p, 0);
        } else {
@@ -2742,7 +2643,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
                inc_nr_running(rq);
        }
        trace_sched_wakeup_new(rq, p, 1);
-       check_preempt_curr(rq, p, 0);
+       check_preempt_curr(rq, p, WF_FORK);
 #ifdef CONFIG_SMP
        if (p->sched_class->task_wake_up)
                p->sched_class->task_wake_up(rq, p);
@@ -2866,7 +2767,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
         */
        prev_state = prev->state;
        finish_arch_switch(prev);
-       perf_counter_task_sched_in(current, cpu_of(rq));
+       perf_event_task_sched_in(current, cpu_of(rq));
        finish_lock_switch(rq, prev);
 
        fire_sched_in_preempt_notifiers(current);
@@ -2964,14 +2865,14 @@ context_switch(struct rq *rq, struct task_struct *prev,
         */
        arch_start_context_switch(prev);
 
-       if (unlikely(!mm)) {
+       if (likely(!mm)) {
                next->active_mm = oldmm;
                atomic_inc(&oldmm->mm_count);
                enter_lazy_tlb(oldmm, next);
        } else
                switch_mm(oldmm, mm, next);
 
-       if (unlikely(!prev->mm)) {
+       if (likely(!prev->mm)) {
                prev->active_mm = NULL;
                rq->prev_mm = oldmm;
        }
@@ -3052,6 +2953,19 @@ unsigned long nr_iowait(void)
        return sum;
 }
 
+unsigned long nr_iowait_cpu(void)
+{
+       struct rq *this = this_rq();
+       return atomic_read(&this->nr_iowait);
+}
+
+unsigned long this_cpu_load(void)
+{
+       struct rq *this = this_rq();
+       return this->cpu_load[0];
+}
+
+
 /* Variables and functions for calc_load */
 static atomic_long_t calc_load_tasks;
 static unsigned long calc_load_update;
@@ -3121,15 +3035,6 @@ static void calc_load_account_active(struct rq *this_rq)
 }
 
 /*
- * Externally visible per-cpu scheduler statistics:
- * cpu_nr_migrations(cpu) - number of migrations into that cpu
- */
-u64 cpu_nr_migrations(int cpu)
-{
-       return cpu_rq(cpu)->nr_migrations_in;
-}
-
-/*
  * Update rq->cpu_load[] statistics. This function is usually called every
  * scheduler tick (TICK_NSEC).
  */
@@ -3251,7 +3156,7 @@ out:
 void sched_exec(void)
 {
        int new_cpu, this_cpu = get_cpu();
-       new_cpu = sched_balance_self(this_cpu, SD_BALANCE_EXEC);
+       new_cpu = current->sched_class->select_task_rq(current, SD_BALANCE_EXEC, 0);
        put_cpu();
        if (new_cpu != this_cpu)
                sched_migrate_task(current, new_cpu);
@@ -3632,7 +3537,7 @@ static inline void update_sd_power_savings_stats(struct sched_group *group,
         * capacity but still has some space to pick up some load
         * from other group and save more power
         */
-       if (sgs->sum_nr_running > sgs->group_capacity - 1)
+       if (sgs->sum_nr_running + 1 > sgs->group_capacity)
                return;
 
        if (sgs->sum_nr_running > sds->leader_nr_running ||
@@ -3671,11 +3576,6 @@ static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
        *imbalance = sds->min_load_per_task;
        sds->busiest = sds->group_min;
 
-       if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) {
-               cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu =
-                       group_first_cpu(sds->group_leader);
-       }
-
        return 1;
 
 }
@@ -3700,8 +3600,105 @@ static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
 #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
 
 
+unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
+{
+       return SCHED_LOAD_SCALE;
+}
+
+unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
+{
+       return default_scale_freq_power(sd, cpu);
+}
+
+unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
+{
+       unsigned long weight = cpumask_weight(sched_domain_span(sd));
+       unsigned long smt_gain = sd->smt_gain;
+
+       smt_gain /= weight;
+
+       return smt_gain;
+}
+
+unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
+{
+       return default_scale_smt_power(sd, cpu);
+}
+
+unsigned long scale_rt_power(int cpu)
+{
+       struct rq *rq = cpu_rq(cpu);
+       u64 total, available;
+
+       sched_avg_update(rq);
+
+       total = sched_avg_period() + (rq->clock - rq->age_stamp);
+       available = total - rq->rt_avg;
+
+       if (unlikely((s64)total < SCHED_LOAD_SCALE))
+               total = SCHED_LOAD_SCALE;
+
+       total >>= SCHED_LOAD_SHIFT;
+
+       return div_u64(available, total);
+}
+
+static void update_cpu_power(struct sched_domain *sd, int cpu)
+{
+       unsigned long weight = cpumask_weight(sched_domain_span(sd));
+       unsigned long power = SCHED_LOAD_SCALE;
+       struct sched_group *sdg = sd->groups;
+
+       if (sched_feat(ARCH_POWER))
+               power *= arch_scale_freq_power(sd, cpu);
+       else
+               power *= default_scale_freq_power(sd, cpu);
+
+       power >>= SCHED_LOAD_SHIFT;
+
+       if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
+               if (sched_feat(ARCH_POWER))
+                       power *= arch_scale_smt_power(sd, cpu);
+               else
+                       power *= default_scale_smt_power(sd, cpu);
+
+               power >>= SCHED_LOAD_SHIFT;
+       }
+
+       power *= scale_rt_power(cpu);
+       power >>= SCHED_LOAD_SHIFT;
+
+       if (!power)
+               power = 1;
+
+       sdg->cpu_power = power;
+}
+
+static void update_group_power(struct sched_domain *sd, int cpu)
+{
+       struct sched_domain *child = sd->child;
+       struct sched_group *group, *sdg = sd->groups;
+       unsigned long power;
+
+       if (!child) {
+               update_cpu_power(sd, cpu);
+               return;
+       }
+
+       power = 0;
+
+       group = child->groups;
+       do {
+               power += group->cpu_power;
+               group = group->next;
+       } while (group != child->groups);
+
+       sdg->cpu_power = power;
+}
+
 /**
  * update_sg_lb_stats - Update sched_group's statistics for load balancing.
+ * @sd: The sched_domain whose statistics are to be updated.
  * @group: sched_group whose statistics are to be updated.
  * @this_cpu: Cpu for which load balance is currently performed.
  * @idle: Idle status of this_cpu
@@ -3712,7 +3709,8 @@ static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
  * @balance: Should we balance.
  * @sgs: variable to hold the statistics for this group.
  */
-static inline void update_sg_lb_stats(struct sched_group *group, int this_cpu,
+static inline void update_sg_lb_stats(struct sched_domain *sd,
+                       struct sched_group *group, int this_cpu,
                        enum cpu_idle_type idle, int load_idx, int *sd_idle,
                        int local_group, const struct cpumask *cpus,
                        int *balance, struct sg_lb_stats *sgs)
@@ -3723,8 +3721,11 @@ static inline void update_sg_lb_stats(struct sched_group *group, int this_cpu,
        unsigned long sum_avg_load_per_task;
        unsigned long avg_load_per_task;
 
-       if (local_group)
+       if (local_group) {
                balance_cpu = group_first_cpu(group);
+               if (balance_cpu == this_cpu)
+                       update_group_power(sd, this_cpu);
+       }
 
        /* Tally up the load of all CPUs in the group */
        sum_avg_load_per_task = avg_load_per_task = 0;
@@ -3773,8 +3774,7 @@ static inline void update_sg_lb_stats(struct sched_group *group, int this_cpu,
        }
 
        /* Adjust by relative CPU power of the group */
-       sgs->avg_load = sg_div_cpu_power(group,
-                       sgs->group_load * SCHED_LOAD_SCALE);
+       sgs->avg_load = (sgs->group_load * SCHED_LOAD_SCALE) / group->cpu_power;
 
 
        /*
@@ -3786,14 +3786,14 @@ static inline void update_sg_lb_stats(struct sched_group *group, int this_cpu,
         *      normalized nr_running number somewhere that negates
         *      the hierarchy?
         */
-       avg_load_per_task = sg_div_cpu_power(group,
-                       sum_avg_load_per_task * SCHED_LOAD_SCALE);
+       avg_load_per_task = (sum_avg_load_per_task * SCHED_LOAD_SCALE) /
+               group->cpu_power;
 
        if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
                sgs->group_imb = 1;
 
-       sgs->group_capacity = group->__cpu_power / SCHED_LOAD_SCALE;
-
+       sgs->group_capacity =
+               DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE);
 }
 
 /**
@@ -3811,9 +3811,13 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
                        const struct cpumask *cpus, int *balance,
                        struct sd_lb_stats *sds)
 {
+       struct sched_domain *child = sd->child;
        struct sched_group *group = sd->groups;
        struct sg_lb_stats sgs;
-       int load_idx;
+       int load_idx, prefer_sibling = 0;
+
+       if (child && child->flags & SD_PREFER_SIBLING)
+               prefer_sibling = 1;
 
        init_sd_power_savings_stats(sd, sds, idle);
        load_idx = get_sd_load_idx(sd, idle);
@@ -3824,14 +3828,22 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
                local_group = cpumask_test_cpu(this_cpu,
                                               sched_group_cpus(group));
                memset(&sgs, 0, sizeof(sgs));
-               update_sg_lb_stats(group, this_cpu, idle, load_idx, sd_idle,
+               update_sg_lb_stats(sd, group, this_cpu, idle, load_idx, sd_idle,
                                local_group, cpus, balance, &sgs);
 
                if (local_group && balance && !(*balance))
                        return;
 
                sds->total_load += sgs.group_load;
-               sds->total_pwr += group->__cpu_power;
+               sds->total_pwr += group->cpu_power;
+
+               /*
+                * In case the child domain prefers tasks go to siblings
+                * first, lower the group capacity to one so that we'll try
+                * and move all the excess tasks away.
+                */
+               if (prefer_sibling)
+                       sgs.group_capacity = min(sgs.group_capacity, 1UL);
 
                if (local_group) {
                        sds->this_load = sgs.avg_load;
@@ -3851,7 +3863,6 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
                update_sd_power_savings_stats(group, sds, local_group, &sgs);
                group = group->next;
        } while (group != sd->groups);
-
 }
 
 /**
@@ -3889,28 +3900,28 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
         * moving them.
         */
 
-       pwr_now += sds->busiest->__cpu_power *
+       pwr_now += sds->busiest->cpu_power *
                        min(sds->busiest_load_per_task, sds->max_load);
-       pwr_now += sds->this->__cpu_power *
+       pwr_now += sds->this->cpu_power *
                        min(sds->this_load_per_task, sds->this_load);
        pwr_now /= SCHED_LOAD_SCALE;
 
        /* Amount of load we'd subtract */
-       tmp = sg_div_cpu_power(sds->busiest,
-                       sds->busiest_load_per_task * SCHED_LOAD_SCALE);
+       tmp = (sds->busiest_load_per_task * SCHED_LOAD_SCALE) /
+               sds->busiest->cpu_power;
        if (sds->max_load > tmp)
-               pwr_move += sds->busiest->__cpu_power *
+               pwr_move += sds->busiest->cpu_power *
                        min(sds->busiest_load_per_task, sds->max_load - tmp);
 
        /* Amount of load we'd add */
-       if (sds->max_load * sds->busiest->__cpu_power <
+       if (sds->max_load * sds->busiest->cpu_power <
                sds->busiest_load_per_task * SCHED_LOAD_SCALE)
-               tmp = sg_div_cpu_power(sds->this,
-                       sds->max_load * sds->busiest->__cpu_power);
+               tmp = (sds->max_load * sds->busiest->cpu_power) /
+                       sds->this->cpu_power;
        else
-               tmp = sg_div_cpu_power(sds->this,
-                       sds->busiest_load_per_task * SCHED_LOAD_SCALE);
-       pwr_move += sds->this->__cpu_power *
+               tmp = (sds->busiest_load_per_task * SCHED_LOAD_SCALE) /
+                       sds->this->cpu_power;
+       pwr_move += sds->this->cpu_power *
                        min(sds->this_load_per_task, sds->this_load + tmp);
        pwr_move /= SCHED_LOAD_SCALE;
 
@@ -3945,8 +3956,8 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
                        sds->max_load - sds->busiest_load_per_task);
 
        /* How much load to actually move to equalise the imbalance */
-       *imbalance = min(max_pull * sds->busiest->__cpu_power,
-               (sds->avg_load - sds->this_load) * sds->this->__cpu_power)
+       *imbalance = min(max_pull * sds->busiest->cpu_power,
+               (sds->avg_load - sds->this_load) * sds->this->cpu_power)
                        / SCHED_LOAD_SCALE;
 
        /*
@@ -4076,15 +4087,18 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
        int i;
 
        for_each_cpu(i, sched_group_cpus(group)) {
+               unsigned long power = power_of(i);
+               unsigned long capacity = DIV_ROUND_CLOSEST(power, SCHED_LOAD_SCALE);
                unsigned long wl;
 
                if (!cpumask_test_cpu(i, cpus))
                        continue;
 
                rq = cpu_rq(i);
-               wl = weighted_cpuload(i);
+               wl = weighted_cpuload(i) * SCHED_LOAD_SCALE;
+               wl /= power;
 
-               if (rq->nr_running == 1 && wl > imbalance)
+               if (capacity && rq->nr_running == 1 && wl > imbalance)
                        continue;
 
                if (wl > max_load) {
@@ -4120,7 +4134,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
        unsigned long flags;
        struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
 
-       cpumask_setall(cpus);
+       cpumask_copy(cpus, cpu_active_mask);
 
        /*
         * When power savings policy is enabled for the parent domain, idle
@@ -4283,7 +4297,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
        int all_pinned = 0;
        struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
 
-       cpumask_setall(cpus);
+       cpumask_copy(cpus, cpu_active_mask);
 
        /*
         * When power savings policy is enabled for the parent domain, idle
@@ -4423,6 +4437,11 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
        int pulled_task = 0;
        unsigned long next_balance = jiffies + HZ;
 
+       this_rq->idle_stamp = this_rq->clock;
+
+       if (this_rq->avg_idle < sysctl_sched_migration_cost)
+               return;
+
        for_each_domain(this_cpu, sd) {
                unsigned long interval;
 
@@ -4437,8 +4456,10 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
                interval = msecs_to_jiffies(sd->balance_interval);
                if (time_after(next_balance, sd->last_balance + interval))
                        next_balance = sd->last_balance + interval;
-               if (pulled_task)
+               if (pulled_task) {
+                       this_rq->idle_stamp = 0;
                        break;
+               }
        }
        if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
                /*
@@ -4673,7 +4694,7 @@ int select_nohz_load_balancer(int stop_tick)
                cpumask_set_cpu(cpu, nohz.cpu_mask);
 
                /* time for ilb owner also to sleep */
-               if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) {
+               if (cpumask_weight(nohz.cpu_mask) == num_active_cpus()) {
                        if (atomic_read(&nohz.load_balancer) == cpu)
                                atomic_set(&nohz.load_balancer, -1);
                        return 0;
@@ -5040,8 +5061,13 @@ static void account_guest_time(struct task_struct *p, cputime_t cputime,
        p->gtime = cputime_add(p->gtime, cputime);
 
        /* Add guest time to cpustat. */
-       cpustat->user = cputime64_add(cpustat->user, tmp);
-       cpustat->guest = cputime64_add(cpustat->guest, tmp);
+       if (TASK_NICE(p) > 0) {
+               cpustat->nice = cputime64_add(cpustat->nice, tmp);
+               cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp);
+       } else {
+               cpustat->user = cputime64_add(cpustat->user, tmp);
+               cpustat->guest = cputime64_add(cpustat->guest, tmp);
+       }
 }
 
 /*
@@ -5119,17 +5145,16 @@ void account_idle_time(cputime_t cputime)
  */
 void account_process_tick(struct task_struct *p, int user_tick)
 {
-       cputime_t one_jiffy = jiffies_to_cputime(1);
-       cputime_t one_jiffy_scaled = cputime_to_scaled(one_jiffy);
+       cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
        struct rq *rq = this_rq();
 
        if (user_tick)
-               account_user_time(p, one_jiffy, one_jiffy_scaled);
+               account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
        else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
-               account_system_time(p, HARDIRQ_OFFSET, one_jiffy,
+               account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
                                    one_jiffy_scaled);
        else
-               account_idle_time(one_jiffy);
+               account_idle_time(cputime_one_jiffy);
 }
 
 /*
@@ -5157,60 +5182,86 @@ void account_idle_ticks(unsigned long ticks)
  * Use precise platform statistics if available:
  */
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
-cputime_t task_utime(struct task_struct *p)
+void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
 {
-       return p->utime;
+       *ut = p->utime;
+       *st = p->stime;
 }
 
-cputime_t task_stime(struct task_struct *p)
+void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
 {
-       return p->stime;
+       struct task_cputime cputime;
+
+       thread_group_cputime(p, &cputime);
+
+       *ut = cputime.utime;
+       *st = cputime.stime;
 }
 #else
-cputime_t task_utime(struct task_struct *p)
+
+#ifndef nsecs_to_cputime
+# define nsecs_to_cputime(__nsecs)     nsecs_to_jiffies(__nsecs)
+#endif
+
+void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
 {
-       clock_t utime = cputime_to_clock_t(p->utime),
-               total = utime + cputime_to_clock_t(p->stime);
-       u64 temp;
+       cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime);
 
        /*
         * Use CFS's precise accounting:
         */
-       temp = (u64)nsec_to_clock_t(p->se.sum_exec_runtime);
+       rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
 
        if (total) {
-               temp *= utime;
+               u64 temp;
+
+               temp = (u64)(rtime * utime);
                do_div(temp, total);
-       }
-       utime = (clock_t)temp;
+               utime = (cputime_t)temp;
+       } else
+               utime = rtime;
+
+       /*
+        * Compare with previous values, to keep monotonicity:
+        */
+       p->prev_utime = max(p->prev_utime, utime);
+       p->prev_stime = max(p->prev_stime, cputime_sub(rtime, p->prev_utime));
 
-       p->prev_utime = max(p->prev_utime, clock_t_to_cputime(utime));
-       return p->prev_utime;
+       *ut = p->prev_utime;
+       *st = p->prev_stime;
 }
 
-cputime_t task_stime(struct task_struct *p)
+/*
+ * Must be called with siglock held.
+ */
+void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
 {
-       clock_t stime;
+       struct signal_struct *sig = p->signal;
+       struct task_cputime cputime;
+       cputime_t rtime, utime, total;
 
-       /*
-        * Use CFS's precise accounting. (we subtract utime from
-        * the total, to make sure the total observed by userspace
-        * grows monotonically - apps rely on that):
-        */
-       stime = nsec_to_clock_t(p->se.sum_exec_runtime) -
-                       cputime_to_clock_t(task_utime(p));
+       thread_group_cputime(p, &cputime);
 
-       if (stime >= 0)
-               p->prev_stime = max(p->prev_stime, clock_t_to_cputime(stime));
+       total = cputime_add(cputime.utime, cputime.stime);
+       rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
 
-       return p->prev_stime;
-}
-#endif
+       if (total) {
+               u64 temp;
 
-inline cputime_t task_gtime(struct task_struct *p)
-{
-       return p->gtime;
+               temp = (u64)(rtime * cputime.utime);
+               do_div(temp, total);
+               utime = (cputime_t)temp;
+       } else
+               utime = rtime;
+
+       sig->prev_utime = max(sig->prev_utime, utime);
+       sig->prev_stime = max(sig->prev_stime,
+                             cputime_sub(rtime, sig->prev_utime));
+
+       *ut = sig->prev_utime;
+       *st = sig->prev_stime;
 }
+#endif
 
 /*
  * This function gets called by the timer code, with HZ frequency.
@@ -5233,7 +5284,7 @@ void scheduler_tick(void)
        curr->sched_class->task_tick(rq, curr, 0);
        spin_unlock(&rq->lock);
 
-       perf_counter_task_tick(curr, cpu);
+       perf_event_task_tick(curr, cpu);
 
 #ifdef CONFIG_SMP
        rq->idle_at_tick = idle_cpu(cpu);
@@ -5345,14 +5396,13 @@ static inline void schedule_debug(struct task_struct *prev)
 #endif
 }
 
-static void put_prev_task(struct rq *rq, struct task_struct *prev)
+static void put_prev_task(struct rq *rq, struct task_struct *p)
 {
-       if (prev->state == TASK_RUNNING) {
-               u64 runtime = prev->se.sum_exec_runtime;
+       u64 runtime = p->se.sum_exec_runtime - p->se.prev_sum_exec_runtime;
 
-               runtime -= prev->se.prev_sum_exec_runtime;
-               runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
+       update_avg(&p->se.avg_running, runtime);
 
+       if (p->state == TASK_RUNNING) {
                /*
                 * In order to avoid avg_overlap growing stale when we are
                 * indeed overlapping and hence not getting put to sleep, grow
@@ -5362,9 +5412,12 @@ static void put_prev_task(struct rq *rq, struct task_struct *prev)
                 * correlates to the amount of cache footprint a task can
                 * build up.
                 */
-               update_avg(&prev->se.avg_overlap, runtime);
+               runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
+               update_avg(&p->se.avg_overlap, runtime);
+       } else {
+               update_avg(&p->se.avg_running, 0);
        }
-       prev->sched_class->put_prev_task(rq, prev);
+       p->sched_class->put_prev_task(rq, p);
 }
 
 /*
@@ -5413,7 +5466,7 @@ need_resched:
        preempt_disable();
        cpu = smp_processor_id();
        rq = cpu_rq(cpu);
-       rcu_qsctr_inc(cpu);
+       rcu_sched_qs(cpu);
        prev = rq->curr;
        switch_count = &prev->nivcsw;
 
@@ -5447,7 +5500,7 @@ need_resched_nonpreemptible:
 
        if (likely(prev != next)) {
                sched_info_switch(prev, next);
-               perf_counter_task_sched_out(prev, next, cpu);
+               perf_event_task_sched_out(prev, next, cpu);
 
                rq->nr_switches++;
                rq->curr = next;
@@ -5474,7 +5527,7 @@ need_resched_nonpreemptible:
 }
 EXPORT_SYMBOL(schedule);
 
-#ifdef CONFIG_SMP
+#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
 /*
  * Look out! "owner" is an entirely speculative pointer
  * access and not reliable.
@@ -5596,10 +5649,10 @@ asmlinkage void __sched preempt_schedule_irq(void)
 
 #endif /* CONFIG_PREEMPT */
 
-int default_wake_function(wait_queue_t *curr, unsigned mode, int sync,
+int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
                          void *key)
 {
-       return try_to_wake_up(curr->private, mode, sync);
+       return try_to_wake_up(curr->private, mode, wake_flags);
 }
 EXPORT_SYMBOL(default_wake_function);
 
@@ -5613,14 +5666,14 @@ EXPORT_SYMBOL(default_wake_function);
  * zero in this (rare) case, and we handle it by continuing to scan the queue.
  */
 static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
-                       int nr_exclusive, int sync, void *key)
+                       int nr_exclusive, int wake_flags, void *key)
 {
        wait_queue_t *curr, *next;
 
        list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
                unsigned flags = curr->flags;
 
-               if (curr->func(curr, mode, sync, key) &&
+               if (curr->func(curr, mode, wake_flags, key) &&
                                (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
                        break;
        }
@@ -5681,16 +5734,16 @@ void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
                        int nr_exclusive, void *key)
 {
        unsigned long flags;
-       int sync = 1;
+       int wake_flags = WF_SYNC;
 
        if (unlikely(!q))
                return;
 
        if (unlikely(!nr_exclusive))
-               sync = 0;
+               wake_flags = 0;
 
        spin_lock_irqsave(&q->lock, flags);
-       __wake_up_common(q, mode, nr_exclusive, sync, key);
+       __wake_up_common(q, mode, nr_exclusive, wake_flags, key);
        spin_unlock_irqrestore(&q->lock, flags);
 }
 EXPORT_SYMBOL_GPL(__wake_up_sync_key);
@@ -6168,22 +6221,14 @@ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
        BUG_ON(p->se.on_rq);
 
        p->policy = policy;
-       switch (p->policy) {
-       case SCHED_NORMAL:
-       case SCHED_BATCH:
-       case SCHED_IDLE:
-               p->sched_class = &fair_sched_class;
-               break;
-       case SCHED_FIFO:
-       case SCHED_RR:
-               p->sched_class = &rt_sched_class;
-               break;
-       }
-
        p->rt_priority = prio;
        p->normal_prio = normal_prio(p);
        /* we are holding p->pi_lock already */
        p->prio = rt_mutex_getprio(p);
+       if (rt_prio(p->prio))
+               p->sched_class = &rt_sched_class;
+       else
+               p->sched_class = &fair_sched_class;
        set_load_weight(p);
 }
 
@@ -6586,6 +6631,8 @@ SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
 long sched_getaffinity(pid_t pid, struct cpumask *mask)
 {
        struct task_struct *p;
+       unsigned long flags;
+       struct rq *rq;
        int retval;
 
        get_online_cpus();
@@ -6600,7 +6647,9 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
        if (retval)
                goto out_unlock;
 
+       rq = task_rq_lock(p, &flags);
        cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
+       task_rq_unlock(rq, &flags);
 
 out_unlock:
        read_unlock(&tasklist_lock);
@@ -6701,6 +6750,8 @@ int __cond_resched_lock(spinlock_t *lock)
        int resched = should_resched();
        int ret = 0;
 
+       lockdep_assert_held(lock);
+
        if (spin_needbreak(lock) || resched) {
                spin_unlock(lock);
                if (resched)
@@ -6744,9 +6795,6 @@ EXPORT_SYMBOL(yield);
 /*
  * This task is about to go to sleep on IO. Increment rq->nr_iowait so
  * that process accounting knows that this is a task in IO wait state.
- *
- * But don't do that if it is a deliberate, throttling IO wait (this task
- * has set its backing_dev_info: the queue against which it should throttle)
  */
 void __sched io_schedule(void)
 {
@@ -6754,7 +6802,9 @@ void __sched io_schedule(void)
 
        delayacct_blkio_start();
        atomic_inc(&rq->nr_iowait);
+       current->in_iowait = 1;
        schedule();
+       current->in_iowait = 0;
        atomic_dec(&rq->nr_iowait);
        delayacct_blkio_end();
 }
@@ -6767,7 +6817,9 @@ long __sched io_schedule_timeout(long timeout)
 
        delayacct_blkio_start();
        atomic_inc(&rq->nr_iowait);
+       current->in_iowait = 1;
        ret = schedule_timeout(timeout);
+       current->in_iowait = 0;
        atomic_dec(&rq->nr_iowait);
        delayacct_blkio_end();
        return ret;
@@ -6835,6 +6887,8 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
 {
        struct task_struct *p;
        unsigned int time_slice;
+       unsigned long flags;
+       struct rq *rq;
        int retval;
        struct timespec t;
 
@@ -6851,23 +6905,10 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
        if (retval)
                goto out_unlock;
 
-       /*
-        * Time slice is 0 for SCHED_FIFO tasks and for SCHED_OTHER
-        * tasks that are on an otherwise idle runqueue:
-        */
-       time_slice = 0;
-       if (p->policy == SCHED_RR) {
-               time_slice = DEF_TIMESLICE;
-       } else if (p->policy != SCHED_FIFO) {
-               struct sched_entity *se = &p->se;
-               unsigned long flags;
-               struct rq *rq;
+       rq = task_rq_lock(p, &flags);
+       time_slice = p->sched_class->get_rr_interval(rq, p);
+       task_rq_unlock(rq, &flags);
 
-               rq = task_rq_lock(p, &flags);
-               if (rq->cfs.load.weight)
-                       time_slice = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
-               task_rq_unlock(rq, &flags);
-       }
        read_unlock(&tasklist_lock);
        jiffies_to_timespec(time_slice, &t);
        retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
@@ -6940,7 +6981,7 @@ void show_state_filter(unsigned long state_filter)
        /*
         * Only show locks if all tasks are dumped:
         */
-       if (state_filter == -1)
+       if (!state_filter)
                debug_show_all_locks();
 }
 
@@ -7060,7 +7101,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
        int ret = 0;
 
        rq = task_rq_lock(p, &flags);
-       if (!cpumask_intersects(new_mask, cpu_online_mask)) {
+       if (!cpumask_intersects(new_mask, cpu_active_mask)) {
                ret = -EINVAL;
                goto out;
        }
@@ -7082,7 +7123,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
        if (cpumask_test_cpu(task_cpu(p), new_mask))
                goto out;
 
-       if (migrate_task(p, cpumask_any_and(cpu_online_mask, new_mask), &req)) {
+       if (migrate_task(p, cpumask_any_and(cpu_active_mask, new_mask), &req)) {
                /* Need help from migration thread: drop lock and wait. */
                struct task_struct *mt = rq->migration_thread;
 
@@ -7147,6 +7188,11 @@ fail:
        return ret;
 }
 
+#define RCU_MIGRATION_IDLE     0
+#define RCU_MIGRATION_NEED_QS  1
+#define RCU_MIGRATION_GOT_QS   2
+#define RCU_MIGRATION_MUST_SYNC        3
+
 /*
  * migration_thread - this is a highprio system thread that performs
  * thread migration by bumping thread off CPU then 'pushing' onto
@@ -7154,6 +7200,7 @@ fail:
  */
 static int migration_thread(void *data)
 {
+       int badcpu;
        int cpu = (long)data;
        struct rq *rq;
 
@@ -7188,8 +7235,17 @@ static int migration_thread(void *data)
                req = list_entry(head->next, struct migration_req, list);
                list_del_init(head->next);
 
-               spin_unlock(&rq->lock);
-               __migrate_task(req->task, cpu, req->dest_cpu);
+               if (req->task != NULL) {
+                       spin_unlock(&rq->lock);
+                       __migrate_task(req->task, cpu, req->dest_cpu);
+               } else if (likely(cpu == (badcpu = smp_processor_id()))) {
+                       req->dest_cpu = RCU_MIGRATION_GOT_QS;
+                       spin_unlock(&rq->lock);
+               } else {
+                       req->dest_cpu = RCU_MIGRATION_MUST_SYNC;
+                       spin_unlock(&rq->lock);
+                       WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu);
+               }
                local_irq_enable();
 
                complete(&req->done);
@@ -7221,19 +7277,19 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
 
 again:
        /* Look for allowed, online CPU in same node. */
-       for_each_cpu_and(dest_cpu, nodemask, cpu_online_mask)
+       for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
                if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
                        goto move;
 
        /* Any allowed, online CPU? */
-       dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_online_mask);
+       dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
        if (dest_cpu < nr_cpu_ids)
                goto move;
 
        /* No more Mr. Nice Guy. */
        if (dest_cpu >= nr_cpu_ids) {
                cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
-               dest_cpu = cpumask_any_and(cpu_online_mask, &p->cpus_allowed);
+               dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
 
                /*
                 * Don't tell them about moving exiting tasks or
@@ -7262,7 +7318,7 @@ move:
  */
 static void migrate_nr_uninterruptible(struct rq *rq_src)
 {
-       struct rq *rq_dest = cpu_rq(cpumask_any(cpu_online_mask));
+       struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
        unsigned long flags;
 
        local_irq_save(flags);
@@ -7516,7 +7572,7 @@ static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
 static struct ctl_table_header *sd_sysctl_header;
 static void register_sched_domain_sysctl(void)
 {
-       int i, cpu_num = num_online_cpus();
+       int i, cpu_num = num_possible_cpus();
        struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
        char buf[32];
 
@@ -7526,7 +7582,7 @@ static void register_sched_domain_sysctl(void)
        if (entry == NULL)
                return;
 
-       for_each_online_cpu(i) {
+       for_each_possible_cpu(i) {
                snprintf(buf, 32, "cpu%d", i);
                entry->procname = kstrdup(buf, GFP_KERNEL);
                entry->mode = 0555;
@@ -7703,7 +7759,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
 /*
  * Register at high priority so that task migration (migrate_all_tasks)
  * happens before everything else.  This has to be lower priority than
- * the notifier in the perf_counter subsystem, though.
+ * the notifier in the perf_event subsystem, though.
  */
 static struct notifier_block __cpuinitdata migration_notifier = {
        .notifier_call = migration_call,
@@ -7730,6 +7786,16 @@ early_initcall(migration_init);
 
 #ifdef CONFIG_SCHED_DEBUG
 
+static __read_mostly int sched_domain_debug_enabled;
+
+static int __init sched_domain_debug_setup(char *str)
+{
+       sched_domain_debug_enabled = 1;
+
+       return 0;
+}
+early_param("sched_debug", sched_domain_debug_setup);
+
 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
                                  struct cpumask *groupmask)
 {
@@ -7768,7 +7834,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
                        break;
                }
 
-               if (!group->__cpu_power) {
+               if (!group->cpu_power) {
                        printk(KERN_CONT "\n");
                        printk(KERN_ERR "ERROR: domain->cpu_power not "
                                        "set\n");
@@ -7792,9 +7858,9 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
                cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
 
                printk(KERN_CONT " %s", str);
-               if (group->__cpu_power != SCHED_LOAD_SCALE) {
-                       printk(KERN_CONT " (__cpu_power = %d)",
-                               group->__cpu_power);
+               if (group->cpu_power != SCHED_LOAD_SCALE) {
+                       printk(KERN_CONT " (cpu_power = %d)",
+                               group->cpu_power);
                }
 
                group = group->next;
@@ -7816,6 +7882,9 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
        cpumask_var_t groupmask;
        int level = 0;
 
+       if (!sched_domain_debug_enabled)
+               return;
+
        if (!sd) {
                printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
                return;
@@ -7859,9 +7928,7 @@ static int sd_degenerate(struct sched_domain *sd)
        }
 
        /* Following flags don't use groups */
-       if (sd->flags & (SD_WAKE_IDLE |
-                        SD_WAKE_AFFINE |
-                        SD_WAKE_BALANCE))
+       if (sd->flags & (SD_WAKE_AFFINE))
                return 0;
 
        return 1;
@@ -7878,10 +7945,6 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
        if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
                return 0;
 
-       /* Does parent contain flags not in child? */
-       /* WAKE_BALANCE is a subset of WAKE_AFFINE */
-       if (cflags & SD_WAKE_AFFINE)
-               pflags &= ~SD_WAKE_BALANCE;
        /* Flags needing groups don't count if only 1 group in parent */
        if (parent->groups == parent->groups->next) {
                pflags &= ~(SD_LOAD_BALANCE |
@@ -7901,6 +7964,8 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
 
 static void free_rootdomain(struct root_domain *rd)
 {
+       synchronize_sched();
+
        cpupri_cleanup(&rd->cpupri);
 
        free_cpumask_var(rd->rto_mask);
@@ -8041,6 +8106,7 @@ static cpumask_var_t cpu_isolated_map;
 /* Setup the mask of cpus configured for isolated domains */
 static int __init isolated_cpu_setup(char *str)
 {
+       alloc_bootmem_cpumask_var(&cpu_isolated_map);
        cpulist_parse(str, cpu_isolated_map);
        return 1;
 }
@@ -8079,7 +8145,7 @@ init_sched_build_groups(const struct cpumask *span,
                        continue;
 
                cpumask_clear(sched_group_cpus(sg));
-               sg->__cpu_power = 0;
+               sg->cpu_power = 0;
 
                for_each_cpu(j, span) {
                        if (group_fn(j, cpu_map, NULL, tmpmask) != group)
@@ -8187,6 +8253,39 @@ struct static_sched_domain {
        DECLARE_BITMAP(span, CONFIG_NR_CPUS);
 };
 
+struct s_data {
+#ifdef CONFIG_NUMA
+       int                     sd_allnodes;
+       cpumask_var_t           domainspan;
+       cpumask_var_t           covered;
+       cpumask_var_t           notcovered;
+#endif
+       cpumask_var_t           nodemask;
+       cpumask_var_t           this_sibling_map;
+       cpumask_var_t           this_core_map;
+       cpumask_var_t           send_covered;
+       cpumask_var_t           tmpmask;
+       struct sched_group      **sched_group_nodes;
+       struct root_domain      *rd;
+};
+
+enum s_alloc {
+       sa_sched_groups = 0,
+       sa_rootdomain,
+       sa_tmpmask,
+       sa_send_covered,
+       sa_this_core_map,
+       sa_this_sibling_map,
+       sa_nodemask,
+       sa_sched_group_nodes,
+#ifdef CONFIG_NUMA
+       sa_notcovered,
+       sa_covered,
+       sa_domainspan,
+#endif
+       sa_none,
+};
+
 /*
  * SMT sched-domains:
  */
@@ -8304,11 +8403,76 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
                                continue;
                        }
 
-                       sg_inc_cpu_power(sg, sd->groups->__cpu_power);
+                       sg->cpu_power += sd->groups->cpu_power;
                }
                sg = sg->next;
        } while (sg != group_head);
 }
+
+static int build_numa_sched_groups(struct s_data *d,
+                                  const struct cpumask *cpu_map, int num)
+{
+       struct sched_domain *sd;
+       struct sched_group *sg, *prev;
+       int n, j;
+
+       cpumask_clear(d->covered);
+       cpumask_and(d->nodemask, cpumask_of_node(num), cpu_map);
+       if (cpumask_empty(d->nodemask)) {
+               d->sched_group_nodes[num] = NULL;
+               goto out;
+       }
+
+       sched_domain_node_span(num, d->domainspan);
+       cpumask_and(d->domainspan, d->domainspan, cpu_map);
+
+       sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
+                         GFP_KERNEL, num);
+       if (!sg) {
+               printk(KERN_WARNING "Can not alloc domain group for node %d\n",
+                      num);
+               return -ENOMEM;
+       }
+       d->sched_group_nodes[num] = sg;
+
+       for_each_cpu(j, d->nodemask) {
+               sd = &per_cpu(node_domains, j).sd;
+               sd->groups = sg;
+       }
+
+       sg->cpu_power = 0;
+       cpumask_copy(sched_group_cpus(sg), d->nodemask);
+       sg->next = sg;
+       cpumask_or(d->covered, d->covered, d->nodemask);
+
+       prev = sg;
+       for (j = 0; j < nr_node_ids; j++) {
+               n = (num + j) % nr_node_ids;
+               cpumask_complement(d->notcovered, d->covered);
+               cpumask_and(d->tmpmask, d->notcovered, cpu_map);
+               cpumask_and(d->tmpmask, d->tmpmask, d->domainspan);
+               if (cpumask_empty(d->tmpmask))
+                       break;
+               cpumask_and(d->tmpmask, d->tmpmask, cpumask_of_node(n));
+               if (cpumask_empty(d->tmpmask))
+                       continue;
+               sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
+                                 GFP_KERNEL, num);
+               if (!sg) {
+                       printk(KERN_WARNING
+                              "Can not alloc domain group for node %d\n", j);
+                       return -ENOMEM;
+               }
+               sg->cpu_power = 0;
+               cpumask_copy(sched_group_cpus(sg), d->tmpmask);
+               sg->next = prev->next;
+               cpumask_or(d->covered, d->covered, d->tmpmask);
+               prev->next = sg;
+               prev = sg;
+       }
+out:
+       return 0;
+}
 #endif /* CONFIG_NUMA */
 
 #ifdef CONFIG_NUMA
@@ -8362,15 +8526,13 @@ static void free_sched_groups(const struct cpumask *cpu_map,
  * there are asymmetries in the topology. If there are asymmetries, group
  * having more cpu_power will pickup more load compared to the group having
  * less cpu_power.
- *
- * cpu_power will be a multiple of SCHED_LOAD_SCALE. This multiple represents
- * the maximum number of tasks a group can handle in the presence of other idle
- * or lightly loaded groups in the same sched domain.
  */
 static void init_sched_groups_power(int cpu, struct sched_domain *sd)
 {
        struct sched_domain *child;
        struct sched_group *group;
+       long power;
+       int weight;
 
        WARN_ON(!sd || !sd->groups);
 
@@ -8379,28 +8541,32 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
 
        child = sd->child;
 
-       sd->groups->__cpu_power = 0;
+       sd->groups->cpu_power = 0;
 
-       /*
-        * For perf policy, if the groups in child domain share resources
-        * (for example cores sharing some portions of the cache hierarchy
-        * or SMT), then set this domain groups cpu_power such that each group
-        * can handle only one task, when there are other idle groups in the
-        * same sched domain.
-        */
-       if (!child || (!(sd->flags & SD_POWERSAVINGS_BALANCE) &&
-                      (child->flags &
-                       (SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES)))) {
-               sg_inc_cpu_power(sd->groups, SCHED_LOAD_SCALE);
+       if (!child) {
+               power = SCHED_LOAD_SCALE;
+               weight = cpumask_weight(sched_domain_span(sd));
+               /*
+                * SMT siblings share the power of a single core.
+                * Usually multiple threads get a better yield out of
+                * that one core than a single thread would have,
+                * reflect that in sd->smt_gain.
+                */
+               if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
+                       power *= sd->smt_gain;
+                       power /= weight;
+                       power >>= SCHED_LOAD_SHIFT;
+               }
+               sd->groups->cpu_power += power;
                return;
        }
 
        /*
-        * add cpu_power of each child group to this groups cpu_power
+        * Add cpu_power of each child group to this groups cpu_power.
         */
        group = child->groups;
        do {
-               sg_inc_cpu_power(sd->groups, group->__cpu_power);
+               sd->groups->cpu_power += group->cpu_power;
                group = group->next;
        } while (group != child->groups);
 }
@@ -8467,287 +8633,292 @@ static void set_domain_attribute(struct sched_domain *sd,
                request = attr->relax_domain_level;
        if (request < sd->level) {
                /* turn off idle balance on this domain */
-               sd->flags &= ~(SD_WAKE_IDLE|SD_BALANCE_NEWIDLE);
+               sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
        } else {
                /* turn on idle balance on this domain */
-               sd->flags |= (SD_WAKE_IDLE_FAR|SD_BALANCE_NEWIDLE);
+               sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
+       }
+}
+
+static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
+                                const struct cpumask *cpu_map)
+{
+       switch (what) {
+       case sa_sched_groups:
+               free_sched_groups(cpu_map, d->tmpmask); /* fall through */
+               d->sched_group_nodes = NULL;
+       case sa_rootdomain:
+               free_rootdomain(d->rd); /* fall through */
+       case sa_tmpmask:
+               free_cpumask_var(d->tmpmask); /* fall through */
+       case sa_send_covered:
+               free_cpumask_var(d->send_covered); /* fall through */
+       case sa_this_core_map:
+               free_cpumask_var(d->this_core_map); /* fall through */
+       case sa_this_sibling_map:
+               free_cpumask_var(d->this_sibling_map); /* fall through */
+       case sa_nodemask:
+               free_cpumask_var(d->nodemask); /* fall through */
+       case sa_sched_group_nodes:
+#ifdef CONFIG_NUMA
+               kfree(d->sched_group_nodes); /* fall through */
+       case sa_notcovered:
+               free_cpumask_var(d->notcovered); /* fall through */
+       case sa_covered:
+               free_cpumask_var(d->covered); /* fall through */
+       case sa_domainspan:
+               free_cpumask_var(d->domainspan); /* fall through */
+#endif
+       case sa_none:
+               break;
        }
 }
 
-/*
- * Build sched domains for a given set of cpus and attach the sched domains
- * to the individual cpus
- */
-static int __build_sched_domains(const struct cpumask *cpu_map,
-                                struct sched_domain_attr *attr)
+static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
+                                                  const struct cpumask *cpu_map)
 {
-       int i, err = -ENOMEM;
-       struct root_domain *rd;
-       cpumask_var_t nodemask, this_sibling_map, this_core_map, send_covered,
-               tmpmask;
 #ifdef CONFIG_NUMA
-       cpumask_var_t domainspan, covered, notcovered;
-       struct sched_group **sched_group_nodes = NULL;
-       int sd_allnodes = 0;
-
-       if (!alloc_cpumask_var(&domainspan, GFP_KERNEL))
-               goto out;
-       if (!alloc_cpumask_var(&covered, GFP_KERNEL))
-               goto free_domainspan;
-       if (!alloc_cpumask_var(&notcovered, GFP_KERNEL))
-               goto free_covered;
-#endif
-
-       if (!alloc_cpumask_var(&nodemask, GFP_KERNEL))
-               goto free_notcovered;
-       if (!alloc_cpumask_var(&this_sibling_map, GFP_KERNEL))
-               goto free_nodemask;
-       if (!alloc_cpumask_var(&this_core_map, GFP_KERNEL))
-               goto free_this_sibling_map;
-       if (!alloc_cpumask_var(&send_covered, GFP_KERNEL))
-               goto free_this_core_map;
-       if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
-               goto free_send_covered;
-
-#ifdef CONFIG_NUMA
-       /*
-        * Allocate the per-node list of sched groups
-        */
-       sched_group_nodes = kcalloc(nr_node_ids, sizeof(struct sched_group *),
-                                   GFP_KERNEL);
-       if (!sched_group_nodes) {
+       if (!alloc_cpumask_var(&d->domainspan, GFP_KERNEL))
+               return sa_none;
+       if (!alloc_cpumask_var(&d->covered, GFP_KERNEL))
+               return sa_domainspan;
+       if (!alloc_cpumask_var(&d->notcovered, GFP_KERNEL))
+               return sa_covered;
+       /* Allocate the per-node list of sched groups */
+       d->sched_group_nodes = kcalloc(nr_node_ids,
+                                     sizeof(struct sched_group *), GFP_KERNEL);
+       if (!d->sched_group_nodes) {
                printk(KERN_WARNING "Can not alloc sched group node list\n");
-               goto free_tmpmask;
-       }
-#endif
-
-       rd = alloc_rootdomain();
-       if (!rd) {
+               return sa_notcovered;
+       }
+       sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d->sched_group_nodes;
+#endif
+       if (!alloc_cpumask_var(&d->nodemask, GFP_KERNEL))
+               return sa_sched_group_nodes;
+       if (!alloc_cpumask_var(&d->this_sibling_map, GFP_KERNEL))
+               return sa_nodemask;
+       if (!alloc_cpumask_var(&d->this_core_map, GFP_KERNEL))
+               return sa_this_sibling_map;
+       if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL))
+               return sa_this_core_map;
+       if (!alloc_cpumask_var(&d->tmpmask, GFP_KERNEL))
+               return sa_send_covered;
+       d->rd = alloc_rootdomain();
+       if (!d->rd) {
                printk(KERN_WARNING "Cannot alloc root domain\n");
-               goto free_sched_groups;
+               return sa_tmpmask;
        }
+       return sa_rootdomain;
+}
 
+static struct sched_domain *__build_numa_sched_domains(struct s_data *d,
+       const struct cpumask *cpu_map, struct sched_domain_attr *attr, int i)
+{
+       struct sched_domain *sd = NULL;
 #ifdef CONFIG_NUMA
-       sched_group_nodes_bycpu[cpumask_first(cpu_map)] = sched_group_nodes;
-#endif
-
-       /*
-        * Set up domains for cpus specified by the cpu_map.
-        */
-       for_each_cpu(i, cpu_map) {
-               struct sched_domain *sd = NULL, *p;
-
-               cpumask_and(nodemask, cpumask_of_node(cpu_to_node(i)), cpu_map);
-
-#ifdef CONFIG_NUMA
-               if (cpumask_weight(cpu_map) >
-                               SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) {
-                       sd = &per_cpu(allnodes_domains, i).sd;
-                       SD_INIT(sd, ALLNODES);
-                       set_domain_attribute(sd, attr);
-                       cpumask_copy(sched_domain_span(sd), cpu_map);
-                       cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask);
-                       p = sd;
-                       sd_allnodes = 1;
-               } else
-                       p = NULL;
+       struct sched_domain *parent;
 
-               sd = &per_cpu(node_domains, i).sd;
-               SD_INIT(sd, NODE);
+       d->sd_allnodes = 0;
+       if (cpumask_weight(cpu_map) >
+           SD_NODES_PER_DOMAIN * cpumask_weight(d->nodemask)) {
+               sd = &per_cpu(allnodes_domains, i).sd;
+               SD_INIT(sd, ALLNODES);
                set_domain_attribute(sd, attr);
-               sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
-               sd->parent = p;
-               if (p)
-                       p->child = sd;
-               cpumask_and(sched_domain_span(sd),
-                           sched_domain_span(sd), cpu_map);
+               cpumask_copy(sched_domain_span(sd), cpu_map);
+               cpu_to_allnodes_group(i, cpu_map, &sd->groups, d->tmpmask);
+               d->sd_allnodes = 1;
+       }
+       parent = sd;
+
+       sd = &per_cpu(node_domains, i).sd;
+       SD_INIT(sd, NODE);
+       set_domain_attribute(sd, attr);
+       sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
+       sd->parent = parent;
+       if (parent)
+               parent->child = sd;
+       cpumask_and(sched_domain_span(sd), sched_domain_span(sd), cpu_map);
 #endif
+       return sd;
+}
 
-               p = sd;
-               sd = &per_cpu(phys_domains, i).sd;
-               SD_INIT(sd, CPU);
-               set_domain_attribute(sd, attr);
-               cpumask_copy(sched_domain_span(sd), nodemask);
-               sd->parent = p;
-               if (p)
-                       p->child = sd;
-               cpu_to_phys_group(i, cpu_map, &sd->groups, tmpmask);
+static struct sched_domain *__build_cpu_sched_domain(struct s_data *d,
+       const struct cpumask *cpu_map, struct sched_domain_attr *attr,
+       struct sched_domain *parent, int i)
+{
+       struct sched_domain *sd;
+       sd = &per_cpu(phys_domains, i).sd;
+       SD_INIT(sd, CPU);
+       set_domain_attribute(sd, attr);
+       cpumask_copy(sched_domain_span(sd), d->nodemask);
+       sd->parent = parent;
+       if (parent)
+               parent->child = sd;
+       cpu_to_phys_group(i, cpu_map, &sd->groups, d->tmpmask);
+       return sd;
+}
 
+static struct sched_domain *__build_mc_sched_domain(struct s_data *d,
+       const struct cpumask *cpu_map, struct sched_domain_attr *attr,
+       struct sched_domain *parent, int i)
+{
+       struct sched_domain *sd = parent;
 #ifdef CONFIG_SCHED_MC
-               p = sd;
-               sd = &per_cpu(core_domains, i).sd;
-               SD_INIT(sd, MC);
-               set_domain_attribute(sd, attr);
-               cpumask_and(sched_domain_span(sd), cpu_map,
-                                                  cpu_coregroup_mask(i));
-               sd->parent = p;
-               p->child = sd;
-               cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask);
+       sd = &per_cpu(core_domains, i).sd;
+       SD_INIT(sd, MC);
+       set_domain_attribute(sd, attr);
+       cpumask_and(sched_domain_span(sd), cpu_map, cpu_coregroup_mask(i));
+       sd->parent = parent;
+       parent->child = sd;
+       cpu_to_core_group(i, cpu_map, &sd->groups, d->tmpmask);
 #endif
+       return sd;
+}
 
+static struct sched_domain *__build_smt_sched_domain(struct s_data *d,
+       const struct cpumask *cpu_map, struct sched_domain_attr *attr,
+       struct sched_domain *parent, int i)
+{
+       struct sched_domain *sd = parent;
 #ifdef CONFIG_SCHED_SMT
-               p = sd;
-               sd = &per_cpu(cpu_domains, i).sd;
-               SD_INIT(sd, SIBLING);
-               set_domain_attribute(sd, attr);
-               cpumask_and(sched_domain_span(sd),
-                           topology_thread_cpumask(i), cpu_map);
-               sd->parent = p;
-               p->child = sd;
-               cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask);
+       sd = &per_cpu(cpu_domains, i).sd;
+       SD_INIT(sd, SIBLING);
+       set_domain_attribute(sd, attr);
+       cpumask_and(sched_domain_span(sd), cpu_map, topology_thread_cpumask(i));
+       sd->parent = parent;
+       parent->child = sd;
+       cpu_to_cpu_group(i, cpu_map, &sd->groups, d->tmpmask);
 #endif
-       }
+       return sd;
+}
 
+static void build_sched_groups(struct s_data *d, enum sched_domain_level l,
+                              const struct cpumask *cpu_map, int cpu)
+{
+       switch (l) {
 #ifdef CONFIG_SCHED_SMT
-       /* Set up CPU (sibling) groups */
-       for_each_cpu(i, cpu_map) {
-               cpumask_and(this_sibling_map,
-                           topology_thread_cpumask(i), cpu_map);
-               if (i != cpumask_first(this_sibling_map))
-                       continue;
-
-               init_sched_build_groups(this_sibling_map, cpu_map,
-                                       &cpu_to_cpu_group,
-                                       send_covered, tmpmask);
-       }
+       case SD_LV_SIBLING: /* set up CPU (sibling) groups */
+               cpumask_and(d->this_sibling_map, cpu_map,
+                           topology_thread_cpumask(cpu));
+               if (cpu == cpumask_first(d->this_sibling_map))
+                       init_sched_build_groups(d->this_sibling_map, cpu_map,
+                                               &cpu_to_cpu_group,
+                                               d->send_covered, d->tmpmask);
+               break;
 #endif
-
 #ifdef CONFIG_SCHED_MC
-       /* Set up multi-core groups */
-       for_each_cpu(i, cpu_map) {
-               cpumask_and(this_core_map, cpu_coregroup_mask(i), cpu_map);
-               if (i != cpumask_first(this_core_map))
-                       continue;
-
-               init_sched_build_groups(this_core_map, cpu_map,
-                                       &cpu_to_core_group,
-                                       send_covered, tmpmask);
-       }
+       case SD_LV_MC: /* set up multi-core groups */
+               cpumask_and(d->this_core_map, cpu_map, cpu_coregroup_mask(cpu));
+               if (cpu == cpumask_first(d->this_core_map))
+                       init_sched_build_groups(d->this_core_map, cpu_map,
+                                               &cpu_to_core_group,
+                                               d->send_covered, d->tmpmask);
+               break;
 #endif
-
-       /* Set up physical groups */
-       for (i = 0; i < nr_node_ids; i++) {
-               cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
-               if (cpumask_empty(nodemask))
-                       continue;
-
-               init_sched_build_groups(nodemask, cpu_map,
-                                       &cpu_to_phys_group,
-                                       send_covered, tmpmask);
-       }
-
+       case SD_LV_CPU: /* set up physical groups */
+               cpumask_and(d->nodemask, cpumask_of_node(cpu), cpu_map);
+               if (!cpumask_empty(d->nodemask))
+                       init_sched_build_groups(d->nodemask, cpu_map,
+                                               &cpu_to_phys_group,
+                                               d->send_covered, d->tmpmask);
+               break;
 #ifdef CONFIG_NUMA
-       /* Set up node groups */
-       if (sd_allnodes) {
-               init_sched_build_groups(cpu_map, cpu_map,
-                                       &cpu_to_allnodes_group,
-                                       send_covered, tmpmask);
+       case SD_LV_ALLNODES:
+               init_sched_build_groups(cpu_map, cpu_map, &cpu_to_allnodes_group,
+                                       d->send_covered, d->tmpmask);
+               break;
+#endif
+       default:
+               break;
        }
+}
 
-       for (i = 0; i < nr_node_ids; i++) {
-               /* Set up node groups */
-               struct sched_group *sg, *prev;
-               int j;
-
-               cpumask_clear(covered);
-               cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
-               if (cpumask_empty(nodemask)) {
-                       sched_group_nodes[i] = NULL;
-                       continue;
-               }
+/*
+ * Build sched domains for a given set of cpus and attach the sched domains
+ * to the individual cpus
+ */
+static int __build_sched_domains(const struct cpumask *cpu_map,
+                                struct sched_domain_attr *attr)
+{
+       enum s_alloc alloc_state = sa_none;
+       struct s_data d;
+       struct sched_domain *sd;
+       int i;
+#ifdef CONFIG_NUMA
+       d.sd_allnodes = 0;
+#endif
 
-               sched_domain_node_span(i, domainspan);
-               cpumask_and(domainspan, domainspan, cpu_map);
+       alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
+       if (alloc_state != sa_rootdomain)
+               goto error;
+       alloc_state = sa_sched_groups;
 
-               sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
-                                 GFP_KERNEL, i);
-               if (!sg) {
-                       printk(KERN_WARNING "Can not alloc domain group for "
-                               "node %d\n", i);
-                       goto error;
-               }
-               sched_group_nodes[i] = sg;
-               for_each_cpu(j, nodemask) {
-                       struct sched_domain *sd;
+       /*
+        * Set up domains for cpus specified by the cpu_map.
+        */
+       for_each_cpu(i, cpu_map) {
+               cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)),
+                           cpu_map);
 
-                       sd = &per_cpu(node_domains, j).sd;
-                       sd->groups = sg;
-               }
-               sg->__cpu_power = 0;
-               cpumask_copy(sched_group_cpus(sg), nodemask);
-               sg->next = sg;
-               cpumask_or(covered, covered, nodemask);
-               prev = sg;
+               sd = __build_numa_sched_domains(&d, cpu_map, attr, i);
+               sd = __build_cpu_sched_domain(&d, cpu_map, attr, sd, i);
+               sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i);
+               sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i);
+       }
 
-               for (j = 0; j < nr_node_ids; j++) {
-                       int n = (i + j) % nr_node_ids;
+       for_each_cpu(i, cpu_map) {
+               build_sched_groups(&d, SD_LV_SIBLING, cpu_map, i);
+               build_sched_groups(&d, SD_LV_MC, cpu_map, i);
+       }
 
-                       cpumask_complement(notcovered, covered);
-                       cpumask_and(tmpmask, notcovered, cpu_map);
-                       cpumask_and(tmpmask, tmpmask, domainspan);
-                       if (cpumask_empty(tmpmask))
-                               break;
+       /* Set up physical groups */
+       for (i = 0; i < nr_node_ids; i++)
+               build_sched_groups(&d, SD_LV_CPU, cpu_map, i);
 
-                       cpumask_and(tmpmask, tmpmask, cpumask_of_node(n));
-                       if (cpumask_empty(tmpmask))
-                               continue;
+#ifdef CONFIG_NUMA
+       /* Set up node groups */
+       if (d.sd_allnodes)
+               build_sched_groups(&d, SD_LV_ALLNODES, cpu_map, 0);
 
-                       sg = kmalloc_node(sizeof(struct sched_group) +
-                                         cpumask_size(),
-                                         GFP_KERNEL, i);
-                       if (!sg) {
-                               printk(KERN_WARNING
-                               "Can not alloc domain group for node %d\n", j);
-                               goto error;
-                       }
-                       sg->__cpu_power = 0;
-                       cpumask_copy(sched_group_cpus(sg), tmpmask);
-                       sg->next = prev->next;
-                       cpumask_or(covered, covered, tmpmask);
-                       prev->next = sg;
-                       prev = sg;
-               }
-       }
+       for (i = 0; i < nr_node_ids; i++)
+               if (build_numa_sched_groups(&d, cpu_map, i))
+                       goto error;
 #endif
 
        /* Calculate CPU power for physical packages and nodes */
 #ifdef CONFIG_SCHED_SMT
        for_each_cpu(i, cpu_map) {
-               struct sched_domain *sd = &per_cpu(cpu_domains, i).sd;
-
+               sd = &per_cpu(cpu_domains, i).sd;
                init_sched_groups_power(i, sd);
        }
 #endif
 #ifdef CONFIG_SCHED_MC
        for_each_cpu(i, cpu_map) {
-               struct sched_domain *sd = &per_cpu(core_domains, i).sd;
-
+               sd = &per_cpu(core_domains, i).sd;
                init_sched_groups_power(i, sd);
        }
 #endif
 
        for_each_cpu(i, cpu_map) {
-               struct sched_domain *sd = &per_cpu(phys_domains, i).sd;
-
+               sd = &per_cpu(phys_domains, i).sd;
                init_sched_groups_power(i, sd);
        }
 
 #ifdef CONFIG_NUMA
        for (i = 0; i < nr_node_ids; i++)
-               init_numa_sched_groups_power(sched_group_nodes[i]);
+               init_numa_sched_groups_power(d.sched_group_nodes[i]);
 
-       if (sd_allnodes) {
+       if (d.sd_allnodes) {
                struct sched_group *sg;
 
                cpu_to_allnodes_group(cpumask_first(cpu_map), cpu_map, &sg,
-                                                               tmpmask);
+                                                               d.tmpmask);
                init_numa_sched_groups_power(sg);
        }
 #endif
 
        /* Attach the domains */
        for_each_cpu(i, cpu_map) {
-               struct sched_domain *sd;
 #ifdef CONFIG_SCHED_SMT
                sd = &per_cpu(cpu_domains, i).sd;
 #elif defined(CONFIG_SCHED_MC)
@@ -8755,44 +8926,16 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
 #else
                sd = &per_cpu(phys_domains, i).sd;
 #endif
-               cpu_attach_domain(sd, rd, i);
+               cpu_attach_domain(sd, d.rd, i);
        }
 
-       err = 0;
-
-free_tmpmask:
-       free_cpumask_var(tmpmask);
-free_send_covered:
-       free_cpumask_var(send_covered);
-free_this_core_map:
-       free_cpumask_var(this_core_map);
-free_this_sibling_map:
-       free_cpumask_var(this_sibling_map);
-free_nodemask:
-       free_cpumask_var(nodemask);
-free_notcovered:
-#ifdef CONFIG_NUMA
-       free_cpumask_var(notcovered);
-free_covered:
-       free_cpumask_var(covered);
-free_domainspan:
-       free_cpumask_var(domainspan);
-out:
-#endif
-       return err;
-
-free_sched_groups:
-#ifdef CONFIG_NUMA
-       kfree(sched_group_nodes);
-#endif
-       goto free_tmpmask;
+       d.sched_group_nodes = NULL; /* don't free this we still need it */
+       __free_domain_allocs(&d, sa_tmpmask, cpu_map);
+       return 0;
 
-#ifdef CONFIG_NUMA
 error:
-       free_sched_groups(cpu_map, tmpmask);
-       free_rootdomain(rd);
-       goto free_tmpmask;
-#endif
+       __free_domain_allocs(&d, alloc_state, cpu_map);
+       return -ENOMEM;
 }
 
 static int build_sched_domains(const struct cpumask *cpu_map)
@@ -8800,7 +8943,7 @@ static int build_sched_domains(const struct cpumask *cpu_map)
        return __build_sched_domains(cpu_map, NULL);
 }
 
-static struct cpumask *doms_cur;       /* current sched domains */
+static cpumask_var_t *doms_cur;        /* current sched domains */
 static int ndoms_cur;          /* number of sched domains in 'doms_cur' */
 static struct sched_domain_attr *dattr_cur;
                                /* attribues of custom domains in 'doms_cur' */
@@ -8822,6 +8965,31 @@ int __attribute__((weak)) arch_update_cpu_topology(void)
        return 0;
 }
 
+cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
+{
+       int i;
+       cpumask_var_t *doms;
+
+       doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
+       if (!doms)
+               return NULL;
+       for (i = 0; i < ndoms; i++) {
+               if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
+                       free_sched_domains(doms, i);
+                       return NULL;
+               }
+       }
+       return doms;
+}
+
+void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
+{
+       unsigned int i;
+       for (i = 0; i < ndoms; i++)
+               free_cpumask_var(doms[i]);
+       kfree(doms);
+}
+
 /*
  * Set up scheduler domains and groups. Callers must hold the hotplug lock.
  * For now this just excludes isolated cpus, but could be used to
@@ -8833,12 +9001,12 @@ static int arch_init_sched_domains(const struct cpumask *cpu_map)
 
        arch_update_cpu_topology();
        ndoms_cur = 1;
-       doms_cur = kmalloc(cpumask_size(), GFP_KERNEL);
+       doms_cur = alloc_sched_domains(ndoms_cur);
        if (!doms_cur)
-               doms_cur = fallback_doms;
-       cpumask_andnot(doms_cur, cpu_map, cpu_isolated_map);
+               doms_cur = &fallback_doms;
+       cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
        dattr_cur = NULL;
-       err = build_sched_domains(doms_cur);
+       err = build_sched_domains(doms_cur[0]);
        register_sched_domain_sysctl();
 
        return err;
@@ -8888,19 +9056,19 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
  * doms_new[] to the current sched domain partitioning, doms_cur[].
  * It destroys each deleted domain and builds each new domain.
  *
- * 'doms_new' is an array of cpumask's of length 'ndoms_new'.
+ * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
  * The masks don't intersect (don't overlap.) We should setup one
  * sched domain for each mask. CPUs not in any of the cpumasks will
  * not be load balanced. If the same cpumask appears both in the
  * current 'doms_cur' domains and in the new 'doms_new', we can leave
  * it as it is.
  *
- * The passed in 'doms_new' should be kmalloc'd. This routine takes
- * ownership of it and will kfree it when done with it. If the caller
- * failed the kmalloc call, then it can pass in doms_new == NULL &&
- * ndoms_new == 1, and partition_sched_domains() will fallback to
- * the single partition 'fallback_doms', it also forces the domains
- * to be rebuilt.
+ * The passed in 'doms_new' should be allocated using
+ * alloc_sched_domains.  This routine takes ownership of it and will
+ * free_sched_domains it when done with it. If the caller failed the
+ * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
+ * and partition_sched_domains() will fallback to the single partition
+ * 'fallback_doms', it also forces the domains to be rebuilt.
  *
  * If doms_new == NULL it will be replaced with cpu_online_mask.
  * ndoms_new == 0 is a special case for destroying existing domains,
@@ -8908,8 +9076,7 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
  *
  * Call with hotplug lock held
  */
-/* FIXME: Change to struct cpumask *doms_new[] */
-void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
+void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
                             struct sched_domain_attr *dattr_new)
 {
        int i, j, n;
@@ -8928,40 +9095,40 @@ void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
        /* Destroy deleted domains */
        for (i = 0; i < ndoms_cur; i++) {
                for (j = 0; j < n && !new_topology; j++) {
-                       if (cpumask_equal(&doms_cur[i], &doms_new[j])
+                       if (cpumask_equal(doms_cur[i], doms_new[j])
                            && dattrs_equal(dattr_cur, i, dattr_new, j))
                                goto match1;
                }
                /* no match - a current sched domain not in new doms_new[] */
-               detach_destroy_domains(doms_cur + i);
+               detach_destroy_domains(doms_cur[i]);
 match1:
                ;
        }
 
        if (doms_new == NULL) {
                ndoms_cur = 0;
-               doms_new = fallback_doms;
-               cpumask_andnot(&doms_new[0], cpu_online_mask, cpu_isolated_map);
+               doms_new = &fallback_doms;
+               cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
                WARN_ON_ONCE(dattr_new);
        }
 
        /* Build new domains */
        for (i = 0; i < ndoms_new; i++) {
                for (j = 0; j < ndoms_cur && !new_topology; j++) {
-                       if (cpumask_equal(&doms_new[i], &doms_cur[j])
+                       if (cpumask_equal(doms_new[i], doms_cur[j])
                            && dattrs_equal(dattr_new, i, dattr_cur, j))
                                goto match2;
                }
                /* no match - add a new doms_new */
-               __build_sched_domains(doms_new + i,
+               __build_sched_domains(doms_new[i],
                                        dattr_new ? dattr_new + i : NULL);
 match2:
                ;
        }
 
        /* Remember the new sched domains */
-       if (doms_cur != fallback_doms)
-               kfree(doms_cur);
+       if (doms_cur != &fallback_doms)
+               free_sched_domains(doms_cur, ndoms_cur);
        kfree(dattr_cur);       /* kfree(NULL) is safe */
        doms_cur = doms_new;
        dattr_cur = dattr_new;
@@ -9072,8 +9239,10 @@ static int update_sched_domains(struct notifier_block *nfb,
        switch (action) {
        case CPU_ONLINE:
        case CPU_ONLINE_FROZEN:
-       case CPU_DEAD:
-       case CPU_DEAD_FROZEN:
+       case CPU_DOWN_PREPARE:
+       case CPU_DOWN_PREPARE_FROZEN:
+       case CPU_DOWN_FAILED:
+       case CPU_DOWN_FAILED_FROZEN:
                partition_sched_domains(1, NULL, NULL);
                return NOTIFY_OK;
 
@@ -9111,6 +9280,7 @@ void __init sched_init_smp(void)
        cpumask_var_t non_isolated_cpus;
 
        alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
+       alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
 
 #if defined(CONFIG_NUMA)
        sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **),
@@ -9119,7 +9289,7 @@ void __init sched_init_smp(void)
 #endif
        get_online_cpus();
        mutex_lock(&sched_domains_mutex);
-       arch_init_sched_domains(cpu_online_mask);
+       arch_init_sched_domains(cpu_active_mask);
        cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
        if (cpumask_empty(non_isolated_cpus))
                cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
@@ -9142,7 +9312,6 @@ void __init sched_init_smp(void)
        sched_init_granularity();
        free_cpumask_var(non_isolated_cpus);
 
-       alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
        init_sched_rt_class();
 }
 #else
@@ -9283,10 +9452,6 @@ void __init sched_init(void)
 #ifdef CONFIG_CPUMASK_OFFSTACK
        alloc_size += num_possible_cpus() * cpumask_size();
 #endif
-       /*
-        * As sched_init() is called before page_alloc is setup,
-        * we use alloc_bootmem().
-        */
        if (alloc_size) {
                ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
 
@@ -9355,6 +9520,10 @@ void __init sched_init(void)
 #endif /* CONFIG_USER_SCHED */
 #endif /* CONFIG_GROUP_SCHED */
 
+#if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP
+       update_shares_data = __alloc_percpu(nr_cpu_ids * sizeof(unsigned long),
+                                           __alignof__(unsigned long));
+#endif
        for_each_possible_cpu(i) {
                struct rq *rq;
 
@@ -9437,6 +9606,8 @@ void __init sched_init(void)
                rq->cpu = i;
                rq->online = 0;
                rq->migration_thread = NULL;
+               rq->idle_stamp = 0;
+               rq->avg_idle = 2*sysctl_sched_migration_cost;
                INIT_LIST_HEAD(&rq->migration_queue);
                rq_attach_root(rq, &def_root_domain);
 #endif
@@ -9480,16 +9651,18 @@ void __init sched_init(void)
        current->sched_class = &fair_sched_class;
 
        /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
-       alloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
+       zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
 #ifdef CONFIG_SMP
 #ifdef CONFIG_NO_HZ
-       alloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT);
+       zalloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT);
        alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT);
 #endif
-       alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
+       /* May be allocated at isolcpus cmdline parse time */
+       if (cpu_isolated_map == NULL)
+               zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
 #endif /* SMP */
 
-       perf_counter_init();
+       perf_event_init();
 
        scheduler_running = 1;
 }
@@ -10261,7 +10434,7 @@ static int sched_rt_global_constraints(void)
 #endif /* CONFIG_RT_GROUP_SCHED */
 
 int sched_rt_handler(struct ctl_table *table, int write,
-               struct file *filp, void __user *buffer, size_t *lenp,
+               void __user *buffer, size_t *lenp,
                loff_t *ppos)
 {
        int ret;
@@ -10272,7 +10445,7 @@ int sched_rt_handler(struct ctl_table *table, int write,
        old_period = sysctl_sched_rt_period;
        old_runtime = sysctl_sched_rt_runtime;
 
-       ret = proc_dointvec(table, write, filp, buffer, lenp, ppos);
+       ret = proc_dointvec(table, write, buffer, lenp, ppos);
 
        if (!ret && write) {
                ret = sched_rt_global_constraints();
@@ -10326,8 +10499,7 @@ cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
 }
 
 static int
-cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
-                     struct task_struct *tsk)
+cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
 {
 #ifdef CONFIG_RT_GROUP_SCHED
        if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
@@ -10337,15 +10509,45 @@ cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
        if (tsk->sched_class != &fair_sched_class)
                return -EINVAL;
 #endif
+       return 0;
+}
 
+static int
+cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
+                     struct task_struct *tsk, bool threadgroup)
+{
+       int retval = cpu_cgroup_can_attach_task(cgrp, tsk);
+       if (retval)
+               return retval;
+       if (threadgroup) {
+               struct task_struct *c;
+               rcu_read_lock();
+               list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
+                       retval = cpu_cgroup_can_attach_task(cgrp, c);
+                       if (retval) {
+                               rcu_read_unlock();
+                               return retval;
+                       }
+               }
+               rcu_read_unlock();
+       }
        return 0;
 }
 
 static void
 cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
-                       struct cgroup *old_cont, struct task_struct *tsk)
+                 struct cgroup *old_cont, struct task_struct *tsk,
+                 bool threadgroup)
 {
        sched_move_task(tsk);
+       if (threadgroup) {
+               struct task_struct *c;
+               rcu_read_lock();
+               list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
+                       sched_move_task(c);
+               }
+               rcu_read_unlock();
+       }
 }
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
@@ -10685,3 +10887,114 @@ struct cgroup_subsys cpuacct_subsys = {
        .subsys_id = cpuacct_subsys_id,
 };
 #endif /* CONFIG_CGROUP_CPUACCT */
+
+#ifndef CONFIG_SMP
+
+int rcu_expedited_torture_stats(char *page)
+{
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rcu_expedited_torture_stats);
+
+void synchronize_sched_expedited(void)
+{
+}
+EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
+
+#else /* #ifndef CONFIG_SMP */
+
+static DEFINE_PER_CPU(struct migration_req, rcu_migration_req);
+static DEFINE_MUTEX(rcu_sched_expedited_mutex);
+
+#define RCU_EXPEDITED_STATE_POST -2
+#define RCU_EXPEDITED_STATE_IDLE -1
+
+static int rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
+
+int rcu_expedited_torture_stats(char *page)
+{
+       int cnt = 0;
+       int cpu;
+
+       cnt += sprintf(&page[cnt], "state: %d /", rcu_expedited_state);
+       for_each_online_cpu(cpu) {
+                cnt += sprintf(&page[cnt], " %d:%d",
+                               cpu, per_cpu(rcu_migration_req, cpu).dest_cpu);
+       }
+       cnt += sprintf(&page[cnt], "\n");
+       return cnt;
+}
+EXPORT_SYMBOL_GPL(rcu_expedited_torture_stats);
+
+static long synchronize_sched_expedited_count;
+
+/*
+ * Wait for an rcu-sched grace period to elapse, but use "big hammer"
+ * approach to force grace period to end quickly.  This consumes
+ * significant time on all CPUs, and is thus not recommended for
+ * any sort of common-case code.
+ *
+ * Note that it is illegal to call this function while holding any
+ * lock that is acquired by a CPU-hotplug notifier.  Failing to
+ * observe this restriction will result in deadlock.
+ */
+void synchronize_sched_expedited(void)
+{
+       int cpu;
+       unsigned long flags;
+       bool need_full_sync = 0;
+       struct rq *rq;
+       struct migration_req *req;
+       long snap;
+       int trycount = 0;
+
+       smp_mb();  /* ensure prior mod happens before capturing snap. */
+       snap = ACCESS_ONCE(synchronize_sched_expedited_count) + 1;
+       get_online_cpus();
+       while (!mutex_trylock(&rcu_sched_expedited_mutex)) {
+               put_online_cpus();
+               if (trycount++ < 10)
+                       udelay(trycount * num_online_cpus());
+               else {
+                       synchronize_sched();
+                       return;
+               }
+               if (ACCESS_ONCE(synchronize_sched_expedited_count) - snap > 0) {
+                       smp_mb(); /* ensure test happens before caller kfree */
+                       return;
+               }
+               get_online_cpus();
+       }
+       rcu_expedited_state = RCU_EXPEDITED_STATE_POST;
+       for_each_online_cpu(cpu) {
+               rq = cpu_rq(cpu);
+               req = &per_cpu(rcu_migration_req, cpu);
+               init_completion(&req->done);
+               req->task = NULL;
+               req->dest_cpu = RCU_MIGRATION_NEED_QS;
+               spin_lock_irqsave(&rq->lock, flags);
+               list_add(&req->list, &rq->migration_queue);
+               spin_unlock_irqrestore(&rq->lock, flags);
+               wake_up_process(rq->migration_thread);
+       }
+       for_each_online_cpu(cpu) {
+               rcu_expedited_state = cpu;
+               req = &per_cpu(rcu_migration_req, cpu);
+               rq = cpu_rq(cpu);
+               wait_for_completion(&req->done);
+               spin_lock_irqsave(&rq->lock, flags);
+               if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC))
+                       need_full_sync = 1;
+               req->dest_cpu = RCU_MIGRATION_IDLE;
+               spin_unlock_irqrestore(&rq->lock, flags);
+       }
+       rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
+       synchronize_sched_expedited_count++;
+       mutex_unlock(&rcu_sched_expedited_mutex);
+       put_online_cpus();
+       if (need_full_sync)
+               synchronize_sched();
+}
+EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
+
+#endif /* #else #ifndef CONFIG_SMP */