Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/sameo/mfd-2.6
[safe/jmp/linux-2.6] / kernel / sched.c
index 475a6f2..fd05861 100644 (file)
@@ -298,7 +298,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_tg_cfs_rq);
 
 #ifdef CONFIG_RT_GROUP_SCHED
 static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq);
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq_var);
 #endif /* CONFIG_RT_GROUP_SCHED */
 #else /* !CONFIG_USER_SCHED */
 #define root_task_group init_task_group
@@ -814,6 +814,7 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32;
  * default: 0.25ms
  */
 unsigned int sysctl_sched_shares_ratelimit = 250000;
+unsigned int normalized_sysctl_sched_shares_ratelimit = 250000;
 
 /*
  * Inject some fuzzyness into changing the per-cpu group shares
@@ -1614,7 +1615,7 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu,
  */
 static int tg_shares_up(struct task_group *tg, void *data)
 {
-       unsigned long weight, rq_weight = 0, shares = 0;
+       unsigned long weight, rq_weight = 0, sum_weight = 0, shares = 0;
        unsigned long *usd_rq_weight;
        struct sched_domain *sd = data;
        unsigned long flags;
@@ -1630,6 +1631,7 @@ static int tg_shares_up(struct task_group *tg, void *data)
                weight = tg->cfs_rq[i]->load.weight;
                usd_rq_weight[i] = weight;
 
+               rq_weight += weight;
                /*
                 * If there are currently no tasks on the cpu pretend there
                 * is one of average load so that when a new task gets to
@@ -1638,10 +1640,13 @@ static int tg_shares_up(struct task_group *tg, void *data)
                if (!weight)
                        weight = NICE_0_LOAD;
 
-               rq_weight += weight;
+               sum_weight += weight;
                shares += tg->cfs_rq[i]->shares;
        }
 
+       if (!rq_weight)
+               rq_weight = sum_weight;
+
        if ((!shares && rq_weight) || shares > tg->shares)
                shares = tg->shares;
 
@@ -1810,6 +1815,22 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
 #endif
 
 static void calc_load_account_active(struct rq *this_rq);
+static void update_sysctl(void);
+static int get_update_sysctl_factor(void);
+
+static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
+{
+       set_task_rq(p, cpu);
+#ifdef CONFIG_SMP
+       /*
+        * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
+        * successfuly executed on another CPU. We must ensure that updates of
+        * per-task data have been completed by this moment.
+        */
+       smp_wmb();
+       task_thread_info(p)->cpu = cpu;
+#endif
+}
 
 #include "sched_stats.h"
 #include "sched_idletask.c"
@@ -1967,20 +1988,6 @@ inline int task_curr(const struct task_struct *p)
        return cpu_curr(task_cpu(p)) == p;
 }
 
-static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
-{
-       set_task_rq(p, cpu);
-#ifdef CONFIG_SMP
-       /*
-        * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
-        * successfuly executed on another CPU. We must ensure that updates of
-        * per-task data have been completed by this moment.
-        */
-       smp_wmb();
-       task_thread_info(p)->cpu = cpu;
-#endif
-}
-
 static inline void check_class_changed(struct rq *rq, struct task_struct *p,
                                       const struct sched_class *prev_class,
                                       int oldprio, int running)
@@ -2060,29 +2067,13 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
 void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
 {
        int old_cpu = task_cpu(p);
-       struct rq *old_rq = cpu_rq(old_cpu), *new_rq = cpu_rq(new_cpu);
        struct cfs_rq *old_cfsrq = task_cfs_rq(p),
                      *new_cfsrq = cpu_cfs_rq(old_cfsrq, new_cpu);
-       u64 clock_offset;
-
-       clock_offset = old_rq->clock - new_rq->clock;
 
        trace_sched_migrate_task(p, new_cpu);
 
-#ifdef CONFIG_SCHEDSTATS
-       if (p->se.wait_start)
-               p->se.wait_start -= clock_offset;
-       if (p->se.sleep_start)
-               p->se.sleep_start -= clock_offset;
-       if (p->se.block_start)
-               p->se.block_start -= clock_offset;
-#endif
        if (old_cpu != new_cpu) {
                p->se.nr_migrations++;
-#ifdef CONFIG_SCHEDSTATS
-               if (task_hot(p, old_rq->clock, NULL))
-                       schedstat_inc(p, se.nr_forced2_migrations);
-#endif
                perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS,
                                     1, 1, NULL, 0);
        }
@@ -2323,6 +2314,14 @@ void task_oncpu_function_call(struct task_struct *p,
        preempt_enable();
 }
 
+#ifdef CONFIG_SMP
+static inline
+int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
+{
+       return p->sched_class->select_task_rq(p, sd_flags, wake_flags);
+}
+#endif
+
 /***
  * try_to_wake_up - wake up a thread
  * @p: the to-be-woken-up thread
@@ -2374,17 +2373,14 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
        if (task_contributes_to_load(p))
                rq->nr_uninterruptible--;
        p->state = TASK_WAKING;
-       task_rq_unlock(rq, &flags);
+       __task_rq_unlock(rq);
 
-       cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
-       if (cpu != orig_cpu) {
-               local_irq_save(flags);
-               rq = cpu_rq(cpu);
-               update_rq_clock(rq);
+       cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
+       if (cpu != orig_cpu)
                set_task_cpu(p, cpu);
-               local_irq_restore(flags);
-       }
-       rq = task_rq_lock(p, &flags);
+
+       rq = __task_rq_lock(p);
+       update_rq_clock(rq);
 
        WARN_ON(p->state != TASK_WAKING);
        cpu = task_cpu(p);
@@ -2499,7 +2495,6 @@ static void __sched_fork(struct task_struct *p)
        p->se.avg_overlap               = 0;
        p->se.start_runtime             = 0;
        p->se.avg_wakeup                = sysctl_sched_wakeup_granularity;
-       p->se.avg_running               = 0;
 
 #ifdef CONFIG_SCHEDSTATS
        p->se.wait_start                        = 0;
@@ -2521,7 +2516,6 @@ static void __sched_fork(struct task_struct *p)
        p->se.nr_failed_migrations_running      = 0;
        p->se.nr_failed_migrations_hot          = 0;
        p->se.nr_forced_migrations              = 0;
-       p->se.nr_forced2_migrations             = 0;
 
        p->se.nr_wakeups                        = 0;
        p->se.nr_wakeups_sync                   = 0;
@@ -2558,7 +2552,6 @@ static void __sched_fork(struct task_struct *p)
 void sched_fork(struct task_struct *p, int clone_flags)
 {
        int cpu = get_cpu();
-       unsigned long flags;
 
        __sched_fork(p);
 
@@ -2592,13 +2585,13 @@ void sched_fork(struct task_struct *p, int clone_flags)
        if (!rt_prio(p->prio))
                p->sched_class = &fair_sched_class;
 
+       if (p->sched_class->task_fork)
+               p->sched_class->task_fork(p);
+
 #ifdef CONFIG_SMP
-       cpu = p->sched_class->select_task_rq(p, SD_BALANCE_FORK, 0);
+       cpu = select_task_rq(p, SD_BALANCE_FORK, 0);
 #endif
-       local_irq_save(flags);
-       update_rq_clock(cpu_rq(cpu));
        set_task_cpu(p, cpu);
-       local_irq_restore(flags);
 
 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
        if (likely(sched_info_on()))
@@ -2631,17 +2624,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
        rq = task_rq_lock(p, &flags);
        BUG_ON(p->state != TASK_RUNNING);
        update_rq_clock(rq);
-
-       if (!p->sched_class->task_new || !current->se.on_rq) {
-               activate_task(rq, p, 0);
-       } else {
-               /*
-                * Let the scheduling class do new task startup
-                * management (if any):
-                */
-               p->sched_class->task_new(rq, p);
-               inc_nr_running(rq);
-       }
+       activate_task(rq, p, 0);
        trace_sched_wakeup_new(rq, p, 1);
        check_preempt_curr(rq, p, WF_FORK);
 #ifdef CONFIG_SMP
@@ -2768,9 +2751,9 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
        prev_state = prev->state;
        finish_arch_switch(prev);
        perf_event_task_sched_in(current, cpu_of(rq));
-       fire_sched_in_preempt_notifiers(current);
        finish_lock_switch(rq, prev);
 
+       fire_sched_in_preempt_notifiers(current);
        if (mm)
                mmdrop(mm);
        if (unlikely(prev_state == TASK_DEAD)) {
@@ -3156,7 +3139,7 @@ out:
 void sched_exec(void)
 {
        int new_cpu, this_cpu = get_cpu();
-       new_cpu = current->sched_class->select_task_rq(current, SD_BALANCE_EXEC, 0);
+       new_cpu = select_task_rq(current, SD_BALANCE_EXEC, 0);
        put_cpu();
        if (new_cpu != this_cpu)
                sched_migrate_task(current, new_cpu);
@@ -3172,10 +3155,6 @@ static void pull_task(struct rq *src_rq, struct task_struct *p,
        deactivate_task(src_rq, p, 0);
        set_task_cpu(p, this_cpu);
        activate_task(this_rq, p, 0);
-       /*
-        * Note that idle threads have a prio of MAX_PRIO, for this test
-        * to be always true for them.
-        */
        check_preempt_curr(this_rq, p, 0);
 }
 
@@ -4134,7 +4113,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
        unsigned long flags;
        struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
 
-       cpumask_copy(cpus, cpu_online_mask);
+       cpumask_copy(cpus, cpu_active_mask);
 
        /*
         * When power savings policy is enabled for the parent domain, idle
@@ -4297,7 +4276,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
        int all_pinned = 0;
        struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
 
-       cpumask_copy(cpus, cpu_online_mask);
+       cpumask_copy(cpus, cpu_active_mask);
 
        /*
         * When power savings policy is enabled for the parent domain, idle
@@ -4694,7 +4673,7 @@ int select_nohz_load_balancer(int stop_tick)
                cpumask_set_cpu(cpu, nohz.cpu_mask);
 
                /* time for ilb owner also to sleep */
-               if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) {
+               if (cpumask_weight(nohz.cpu_mask) == num_active_cpus()) {
                        if (atomic_read(&nohz.load_balancer) == cpu)
                                atomic_set(&nohz.load_balancer, -1);
                        return 0;
@@ -5182,33 +5161,30 @@ void account_idle_ticks(unsigned long ticks)
  * Use precise platform statistics if available:
  */
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
-cputime_t task_utime(struct task_struct *p)
+void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
 {
-       return p->utime;
+       *ut = p->utime;
+       *st = p->stime;
 }
 
-cputime_t task_stime(struct task_struct *p)
+void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
 {
-       return p->stime;
-}
+       struct task_cputime cputime;
 
-void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
-{
-       if (ut)
-               *ut = task_utime(p);
-       if (st)
-               *st = task_stime(p);
+       thread_group_cputime(p, &cputime);
+
+       *ut = cputime.utime;
+       *st = cputime.stime;
 }
 #else
 
 #ifndef nsecs_to_cputime
-# define nsecs_to_cputime(__nsecs) \
-       msecs_to_cputime(div_u64((__nsecs), NSEC_PER_MSEC))
+# define nsecs_to_cputime(__nsecs)     nsecs_to_jiffies(__nsecs)
 #endif
 
 void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
 {
-       cputime_t rtime, utime = p->utime, total = utime + p->stime;
+       cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime);
 
        /*
         * Use CFS's precise accounting:
@@ -5228,33 +5204,43 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
         * Compare with previous values, to keep monotonicity:
         */
        p->prev_utime = max(p->prev_utime, utime);
-       p->prev_stime = max(p->prev_stime, rtime - p->prev_utime);
+       p->prev_stime = max(p->prev_stime, cputime_sub(rtime, p->prev_utime));
 
-       if (ut)
-               *ut = p->prev_utime;
-       if (st)
-               *st = p->prev_stime;
+       *ut = p->prev_utime;
+       *st = p->prev_stime;
 }
 
-cputime_t task_utime(struct task_struct *p)
+/*
+ * Must be called with siglock held.
+ */
+void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
 {
-       cputime_t utime;
-       task_times(p, &utime, NULL);
-       return utime;
-}
+       struct signal_struct *sig = p->signal;
+       struct task_cputime cputime;
+       cputime_t rtime, utime, total;
 
-cputime_t task_stime(struct task_struct *p)
-{
-       cputime_t stime;
-       task_times(p, NULL, &stime);
-       return stime;
-}
-#endif
+       thread_group_cputime(p, &cputime);
 
-inline cputime_t task_gtime(struct task_struct *p)
-{
-       return p->gtime;
+       total = cputime_add(cputime.utime, cputime.stime);
+       rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
+
+       if (total) {
+               u64 temp;
+
+               temp = (u64)(rtime * cputime.utime);
+               do_div(temp, total);
+               utime = (cputime_t)temp;
+       } else
+               utime = rtime;
+
+       sig->prev_utime = max(sig->prev_utime, utime);
+       sig->prev_stime = max(sig->prev_stime,
+                             cputime_sub(rtime, sig->prev_utime));
+
+       *ut = sig->prev_utime;
+       *st = sig->prev_stime;
 }
+#endif
 
 /*
  * This function gets called by the timer code, with HZ frequency.
@@ -5389,13 +5375,14 @@ static inline void schedule_debug(struct task_struct *prev)
 #endif
 }
 
-static void put_prev_task(struct rq *rq, struct task_struct *p)
+static void put_prev_task(struct rq *rq, struct task_struct *prev)
 {
-       u64 runtime = p->se.sum_exec_runtime - p->se.prev_sum_exec_runtime;
+       if (prev->state == TASK_RUNNING) {
+               u64 runtime = prev->se.sum_exec_runtime;
 
-       update_avg(&p->se.avg_running, runtime);
+               runtime -= prev->se.prev_sum_exec_runtime;
+               runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
 
-       if (p->state == TASK_RUNNING) {
                /*
                 * In order to avoid avg_overlap growing stale when we are
                 * indeed overlapping and hence not getting put to sleep, grow
@@ -5405,12 +5392,9 @@ static void put_prev_task(struct rq *rq, struct task_struct *p)
                 * correlates to the amount of cache footprint a task can
                 * build up.
                 */
-               runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
-               update_avg(&p->se.avg_overlap, runtime);
-       } else {
-               update_avg(&p->se.avg_running, 0);
+               update_avg(&prev->se.avg_overlap, runtime);
        }
-       p->sched_class->put_prev_task(rq, p);
+       prev->sched_class->put_prev_task(rq, prev);
 }
 
 /*
@@ -5520,7 +5504,7 @@ need_resched_nonpreemptible:
 }
 EXPORT_SYMBOL(schedule);
 
-#ifdef CONFIG_SMP
+#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
 /*
  * Look out! "owner" is an entirely speculative pointer
  * access and not reliable.
@@ -6624,6 +6608,8 @@ SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
 long sched_getaffinity(pid_t pid, struct cpumask *mask)
 {
        struct task_struct *p;
+       unsigned long flags;
+       struct rq *rq;
        int retval;
 
        get_online_cpus();
@@ -6638,7 +6624,9 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
        if (retval)
                goto out_unlock;
 
+       rq = task_rq_lock(p, &flags);
        cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
+       task_rq_unlock(rq, &flags);
 
 out_unlock:
        read_unlock(&tasklist_lock);
@@ -6876,6 +6864,8 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
 {
        struct task_struct *p;
        unsigned int time_slice;
+       unsigned long flags;
+       struct rq *rq;
        int retval;
        struct timespec t;
 
@@ -6892,7 +6882,9 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
        if (retval)
                goto out_unlock;
 
-       time_slice = p->sched_class->get_rr_interval(p);
+       rq = task_rq_lock(p, &flags);
+       time_slice = p->sched_class->get_rr_interval(rq, p);
+       task_rq_unlock(rq, &flags);
 
        read_unlock(&tasklist_lock);
        jiffies_to_timespec(time_slice, &t);
@@ -6993,7 +6985,6 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
        __sched_fork(idle);
        idle->se.exec_start = sched_clock();
 
-       idle->prio = idle->normal_prio = MAX_PRIO;
        cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
        __set_task_cpu(idle, cpu);
 
@@ -7034,22 +7025,43 @@ cpumask_var_t nohz_cpu_mask;
  *
  * This idea comes from the SD scheduler of Con Kolivas:
  */
-static inline void sched_init_granularity(void)
+static int get_update_sysctl_factor(void)
 {
-       unsigned int factor = 1 + ilog2(num_online_cpus());
-       const unsigned long limit = 200000000;
+       unsigned int cpus = min_t(int, num_online_cpus(), 8);
+       unsigned int factor;
 
-       sysctl_sched_min_granularity *= factor;
-       if (sysctl_sched_min_granularity > limit)
-               sysctl_sched_min_granularity = limit;
+       switch (sysctl_sched_tunable_scaling) {
+       case SCHED_TUNABLESCALING_NONE:
+               factor = 1;
+               break;
+       case SCHED_TUNABLESCALING_LINEAR:
+               factor = cpus;
+               break;
+       case SCHED_TUNABLESCALING_LOG:
+       default:
+               factor = 1 + ilog2(cpus);
+               break;
+       }
 
-       sysctl_sched_latency *= factor;
-       if (sysctl_sched_latency > limit)
-               sysctl_sched_latency = limit;
+       return factor;
+}
+
+static void update_sysctl(void)
+{
+       unsigned int factor = get_update_sysctl_factor();
 
-       sysctl_sched_wakeup_granularity *= factor;
+#define SET_SYSCTL(name) \
+       (sysctl_##name = (factor) * normalized_sysctl_##name)
+       SET_SYSCTL(sched_min_granularity);
+       SET_SYSCTL(sched_latency);
+       SET_SYSCTL(sched_wakeup_granularity);
+       SET_SYSCTL(sched_shares_ratelimit);
+#undef SET_SYSCTL
+}
 
-       sysctl_sched_shares_ratelimit *= factor;
+static inline void sched_init_granularity(void)
+{
+       update_sysctl();
 }
 
 #ifdef CONFIG_SMP
@@ -7086,7 +7098,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
        int ret = 0;
 
        rq = task_rq_lock(p, &flags);
-       if (!cpumask_intersects(new_mask, cpu_online_mask)) {
+       if (!cpumask_intersects(new_mask, cpu_active_mask)) {
                ret = -EINVAL;
                goto out;
        }
@@ -7108,7 +7120,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
        if (cpumask_test_cpu(task_cpu(p), new_mask))
                goto out;
 
-       if (migrate_task(p, cpumask_any_and(cpu_online_mask, new_mask), &req)) {
+       if (migrate_task(p, cpumask_any_and(cpu_active_mask, new_mask), &req)) {
                /* Need help from migration thread: drop lock and wait. */
                struct task_struct *mt = rq->migration_thread;
 
@@ -7262,19 +7274,19 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
 
 again:
        /* Look for allowed, online CPU in same node. */
-       for_each_cpu_and(dest_cpu, nodemask, cpu_online_mask)
+       for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
                if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
                        goto move;
 
        /* Any allowed, online CPU? */
-       dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_online_mask);
+       dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
        if (dest_cpu < nr_cpu_ids)
                goto move;
 
        /* No more Mr. Nice Guy. */
        if (dest_cpu >= nr_cpu_ids) {
                cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
-               dest_cpu = cpumask_any_and(cpu_online_mask, &p->cpus_allowed);
+               dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
 
                /*
                 * Don't tell them about moving exiting tasks or
@@ -7303,7 +7315,7 @@ move:
  */
 static void migrate_nr_uninterruptible(struct rq *rq_src)
 {
-       struct rq *rq_dest = cpu_rq(cpumask_any(cpu_online_mask));
+       struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
        unsigned long flags;
 
        local_irq_save(flags);
@@ -7437,17 +7449,16 @@ static struct ctl_table sd_ctl_dir[] = {
                .procname       = "sched_domain",
                .mode           = 0555,
        },
-       {0, },
+       {}
 };
 
 static struct ctl_table sd_ctl_root[] = {
        {
-               .ctl_name       = CTL_KERN,
                .procname       = "kernel",
                .mode           = 0555,
                .child          = sd_ctl_dir,
        },
-       {0, },
+       {}
 };
 
 static struct ctl_table *sd_alloc_ctl_entry(int n)
@@ -7557,7 +7568,7 @@ static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
 static struct ctl_table_header *sd_sysctl_header;
 static void register_sched_domain_sysctl(void)
 {
-       int i, cpu_num = num_online_cpus();
+       int i, cpu_num = num_possible_cpus();
        struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
        char buf[32];
 
@@ -7567,7 +7578,7 @@ static void register_sched_domain_sysctl(void)
        if (entry == NULL)
                return;
 
-       for_each_online_cpu(i) {
+       for_each_possible_cpu(i) {
                snprintf(buf, 32, "cpu%d", i);
                entry->procname = kstrdup(buf, GFP_KERNEL);
                entry->mode = 0555;
@@ -7697,7 +7708,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
                spin_lock_irq(&rq->lock);
                update_rq_clock(rq);
                deactivate_task(rq, rq->idle, 0);
-               rq->idle->static_prio = MAX_PRIO;
                __setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
                rq->idle->sched_class = &idle_sched_class;
                migrate_dead_tasks(cpu);
@@ -8091,6 +8101,7 @@ static cpumask_var_t cpu_isolated_map;
 /* Setup the mask of cpus configured for isolated domains */
 static int __init isolated_cpu_setup(char *str)
 {
+       alloc_bootmem_cpumask_var(&cpu_isolated_map);
        cpulist_parse(str, cpu_isolated_map);
        return 1;
 }
@@ -8275,14 +8286,14 @@ enum s_alloc {
  */
 #ifdef CONFIG_SCHED_SMT
 static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains);
-static DEFINE_PER_CPU(struct static_sched_group, sched_group_cpus);
+static DEFINE_PER_CPU(struct static_sched_group, sched_groups);
 
 static int
 cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map,
                 struct sched_group **sg, struct cpumask *unused)
 {
        if (sg)
-               *sg = &per_cpu(sched_group_cpus, cpu).sg;
+               *sg = &per_cpu(sched_groups, cpu).sg;
        return cpu;
 }
 #endif /* CONFIG_SCHED_SMT */
@@ -9092,7 +9103,7 @@ match1:
        if (doms_new == NULL) {
                ndoms_cur = 0;
                doms_new = &fallback_doms;
-               cpumask_andnot(doms_new[0], cpu_online_mask, cpu_isolated_map);
+               cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
                WARN_ON_ONCE(dattr_new);
        }
 
@@ -9223,8 +9234,10 @@ static int update_sched_domains(struct notifier_block *nfb,
        switch (action) {
        case CPU_ONLINE:
        case CPU_ONLINE_FROZEN:
-       case CPU_DEAD:
-       case CPU_DEAD_FROZEN:
+       case CPU_DOWN_PREPARE:
+       case CPU_DOWN_PREPARE_FROZEN:
+       case CPU_DOWN_FAILED:
+       case CPU_DOWN_FAILED_FROZEN:
                partition_sched_domains(1, NULL, NULL);
                return NOTIFY_OK;
 
@@ -9271,7 +9284,7 @@ void __init sched_init_smp(void)
 #endif
        get_online_cpus();
        mutex_lock(&sched_domains_mutex);
-       arch_init_sched_domains(cpu_online_mask);
+       arch_init_sched_domains(cpu_active_mask);
        cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
        if (cpumask_empty(non_isolated_cpus))
                cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
@@ -9570,7 +9583,7 @@ void __init sched_init(void)
 #elif defined CONFIG_USER_SCHED
                init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL);
                init_tg_rt_entry(&init_task_group,
-                               &per_cpu(init_rt_rq, i),
+                               &per_cpu(init_rt_rq_var, i),
                                &per_cpu(init_sched_rt_entity, i), i, 1,
                                root_task_group.rt_se[i]);
 #endif
@@ -9639,7 +9652,9 @@ void __init sched_init(void)
        zalloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT);
        alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT);
 #endif
-       zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
+       /* May be allocated at isolcpus cmdline parse time */
+       if (cpu_isolated_map == NULL)
+               zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
 #endif /* SMP */
 
        perf_event_init();
@@ -9833,13 +9848,15 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
                se = kzalloc_node(sizeof(struct sched_entity),
                                  GFP_KERNEL, cpu_to_node(i));
                if (!se)
-                       goto err;
+                       goto err_free_rq;
 
                init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent->se[i]);
        }
 
        return 1;
 
+ err_free_rq:
+       kfree(cfs_rq);
  err:
        return 0;
 }
@@ -9921,13 +9938,15 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
                rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
                                     GFP_KERNEL, cpu_to_node(i));
                if (!rt_se)
-                       goto err;
+                       goto err_free_rq;
 
                init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent->rt_se[i]);
        }
 
        return 1;
 
+ err_free_rq:
+       kfree(rt_rq);
  err:
        return 0;
 }
@@ -10969,6 +10988,7 @@ void synchronize_sched_expedited(void)
                spin_unlock_irqrestore(&rq->lock, flags);
        }
        rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
+       synchronize_sched_expedited_count++;
        mutex_unlock(&rcu_sched_expedited_mutex);
        put_online_cpus();
        if (need_full_sync)