[sysctl] Fix breakage on systems with older glibc
[safe/jmp/linux-2.6] / kernel / sched.c
index b3d4e2b..18cceee 100644 (file)
@@ -141,7 +141,7 @@ struct rt_prio_array {
 
 struct rt_bandwidth {
        /* nests inside the rq lock: */
-       spinlock_t              rt_runtime_lock;
+       raw_spinlock_t          rt_runtime_lock;
        ktime_t                 rt_period;
        u64                     rt_runtime;
        struct hrtimer          rt_period_timer;
@@ -178,7 +178,7 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
        rt_b->rt_period = ns_to_ktime(period);
        rt_b->rt_runtime = runtime;
 
-       spin_lock_init(&rt_b->rt_runtime_lock);
+       raw_spin_lock_init(&rt_b->rt_runtime_lock);
 
        hrtimer_init(&rt_b->rt_period_timer,
                        CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -200,7 +200,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
        if (hrtimer_active(&rt_b->rt_period_timer))
                return;
 
-       spin_lock(&rt_b->rt_runtime_lock);
+       raw_spin_lock(&rt_b->rt_runtime_lock);
        for (;;) {
                unsigned long delta;
                ktime_t soft, hard;
@@ -217,7 +217,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
                __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
                                HRTIMER_MODE_ABS_PINNED, 0);
        }
-       spin_unlock(&rt_b->rt_runtime_lock);
+       raw_spin_unlock(&rt_b->rt_runtime_lock);
 }
 
 #ifdef CONFIG_RT_GROUP_SCHED
@@ -298,7 +298,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_tg_cfs_rq);
 
 #ifdef CONFIG_RT_GROUP_SCHED
 static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq);
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq_var);
 #endif /* CONFIG_RT_GROUP_SCHED */
 #else /* !CONFIG_USER_SCHED */
 #define root_task_group init_task_group
@@ -470,7 +470,7 @@ struct rt_rq {
        u64 rt_time;
        u64 rt_runtime;
        /* Nests inside the rq lock: */
-       spinlock_t rt_runtime_lock;
+       raw_spinlock_t rt_runtime_lock;
 
 #ifdef CONFIG_RT_GROUP_SCHED
        unsigned long rt_nr_boosted;
@@ -525,7 +525,7 @@ static struct root_domain def_root_domain;
  */
 struct rq {
        /* runqueue lock: */
-       spinlock_t lock;
+       raw_spinlock_t lock;
 
        /*
         * nr_running and cpu_load should be in the same cacheline because
@@ -685,7 +685,7 @@ inline void update_rq_clock(struct rq *rq)
  */
 int runqueue_is_locked(int cpu)
 {
-       return spin_is_locked(&cpu_rq(cpu)->lock);
+       return raw_spin_is_locked(&cpu_rq(cpu)->lock);
 }
 
 /*
@@ -814,6 +814,7 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32;
  * default: 0.25ms
  */
 unsigned int sysctl_sched_shares_ratelimit = 250000;
+unsigned int normalized_sysctl_sched_shares_ratelimit = 250000;
 
 /*
  * Inject some fuzzyness into changing the per-cpu group shares
@@ -892,7 +893,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
         */
        spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
 
-       spin_unlock_irq(&rq->lock);
+       raw_spin_unlock_irq(&rq->lock);
 }
 
 #else /* __ARCH_WANT_UNLOCKED_CTXSW */
@@ -916,9 +917,9 @@ static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
        next->oncpu = 1;
 #endif
 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
-       spin_unlock_irq(&rq->lock);
+       raw_spin_unlock_irq(&rq->lock);
 #else
-       spin_unlock(&rq->lock);
+       raw_spin_unlock(&rq->lock);
 #endif
 }
 
@@ -948,10 +949,10 @@ static inline struct rq *__task_rq_lock(struct task_struct *p)
 {
        for (;;) {
                struct rq *rq = task_rq(p);
-               spin_lock(&rq->lock);
+               raw_spin_lock(&rq->lock);
                if (likely(rq == task_rq(p)))
                        return rq;
-               spin_unlock(&rq->lock);
+               raw_spin_unlock(&rq->lock);
        }
 }
 
@@ -968,10 +969,10 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
        for (;;) {
                local_irq_save(*flags);
                rq = task_rq(p);
-               spin_lock(&rq->lock);
+               raw_spin_lock(&rq->lock);
                if (likely(rq == task_rq(p)))
                        return rq;
-               spin_unlock_irqrestore(&rq->lock, *flags);
+               raw_spin_unlock_irqrestore(&rq->lock, *flags);
        }
 }
 
@@ -980,19 +981,19 @@ void task_rq_unlock_wait(struct task_struct *p)
        struct rq *rq = task_rq(p);
 
        smp_mb(); /* spin-unlock-wait is not a full memory barrier */
-       spin_unlock_wait(&rq->lock);
+       raw_spin_unlock_wait(&rq->lock);
 }
 
 static void __task_rq_unlock(struct rq *rq)
        __releases(rq->lock)
 {
-       spin_unlock(&rq->lock);
+       raw_spin_unlock(&rq->lock);
 }
 
 static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
        __releases(rq->lock)
 {
-       spin_unlock_irqrestore(&rq->lock, *flags);
+       raw_spin_unlock_irqrestore(&rq->lock, *flags);
 }
 
 /*
@@ -1005,7 +1006,7 @@ static struct rq *this_rq_lock(void)
 
        local_irq_disable();
        rq = this_rq();
-       spin_lock(&rq->lock);
+       raw_spin_lock(&rq->lock);
 
        return rq;
 }
@@ -1052,10 +1053,10 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
 
        WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
 
-       spin_lock(&rq->lock);
+       raw_spin_lock(&rq->lock);
        update_rq_clock(rq);
        rq->curr->sched_class->task_tick(rq, rq->curr, 1);
-       spin_unlock(&rq->lock);
+       raw_spin_unlock(&rq->lock);
 
        return HRTIMER_NORESTART;
 }
@@ -1068,10 +1069,10 @@ static void __hrtick_start(void *arg)
 {
        struct rq *rq = arg;
 
-       spin_lock(&rq->lock);
+       raw_spin_lock(&rq->lock);
        hrtimer_restart(&rq->hrtick_timer);
        rq->hrtick_csd_pending = 0;
-       spin_unlock(&rq->lock);
+       raw_spin_unlock(&rq->lock);
 }
 
 /*
@@ -1178,7 +1179,7 @@ static void resched_task(struct task_struct *p)
 {
        int cpu;
 
-       assert_spin_locked(&task_rq(p)->lock);
+       assert_raw_spin_locked(&task_rq(p)->lock);
 
        if (test_tsk_need_resched(p))
                return;
@@ -1200,10 +1201,10 @@ static void resched_cpu(int cpu)
        struct rq *rq = cpu_rq(cpu);
        unsigned long flags;
 
-       if (!spin_trylock_irqsave(&rq->lock, flags))
+       if (!raw_spin_trylock_irqsave(&rq->lock, flags))
                return;
        resched_task(cpu_curr(cpu));
-       spin_unlock_irqrestore(&rq->lock, flags);
+       raw_spin_unlock_irqrestore(&rq->lock, flags);
 }
 
 #ifdef CONFIG_NO_HZ
@@ -1272,7 +1273,7 @@ static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
 #else /* !CONFIG_SMP */
 static void resched_task(struct task_struct *p)
 {
-       assert_spin_locked(&task_rq(p)->lock);
+       assert_raw_spin_locked(&task_rq(p)->lock);
        set_tsk_need_resched(p);
 }
 
@@ -1599,11 +1600,11 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu,
                struct rq *rq = cpu_rq(cpu);
                unsigned long flags;
 
-               spin_lock_irqsave(&rq->lock, flags);
+               raw_spin_lock_irqsave(&rq->lock, flags);
                tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight;
                tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
                __set_se_shares(tg->se[cpu], shares);
-               spin_unlock_irqrestore(&rq->lock, flags);
+               raw_spin_unlock_irqrestore(&rq->lock, flags);
        }
 }
 
@@ -1614,7 +1615,7 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu,
  */
 static int tg_shares_up(struct task_group *tg, void *data)
 {
-       unsigned long weight, rq_weight = 0, shares = 0;
+       unsigned long weight, rq_weight = 0, sum_weight = 0, shares = 0;
        unsigned long *usd_rq_weight;
        struct sched_domain *sd = data;
        unsigned long flags;
@@ -1630,6 +1631,7 @@ static int tg_shares_up(struct task_group *tg, void *data)
                weight = tg->cfs_rq[i]->load.weight;
                usd_rq_weight[i] = weight;
 
+               rq_weight += weight;
                /*
                 * If there are currently no tasks on the cpu pretend there
                 * is one of average load so that when a new task gets to
@@ -1638,10 +1640,13 @@ static int tg_shares_up(struct task_group *tg, void *data)
                if (!weight)
                        weight = NICE_0_LOAD;
 
-               rq_weight += weight;
+               sum_weight += weight;
                shares += tg->cfs_rq[i]->shares;
        }
 
+       if (!rq_weight)
+               rq_weight = sum_weight;
+
        if ((!shares && rq_weight) || shares > tg->shares)
                shares = tg->shares;
 
@@ -1701,9 +1706,9 @@ static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
        if (root_task_group_empty())
                return;
 
-       spin_unlock(&rq->lock);
+       raw_spin_unlock(&rq->lock);
        update_shares(sd);
-       spin_lock(&rq->lock);
+       raw_spin_lock(&rq->lock);
 }
 
 static void update_h_load(long cpu)
@@ -1743,7 +1748,7 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
        __acquires(busiest->lock)
        __acquires(this_rq->lock)
 {
-       spin_unlock(&this_rq->lock);
+       raw_spin_unlock(&this_rq->lock);
        double_rq_lock(this_rq, busiest);
 
        return 1;
@@ -1764,14 +1769,16 @@ static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
 {
        int ret = 0;
 
-       if (unlikely(!spin_trylock(&busiest->lock))) {
+       if (unlikely(!raw_spin_trylock(&busiest->lock))) {
                if (busiest < this_rq) {
-                       spin_unlock(&this_rq->lock);
-                       spin_lock(&busiest->lock);
-                       spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
+                       raw_spin_unlock(&this_rq->lock);
+                       raw_spin_lock(&busiest->lock);
+                       raw_spin_lock_nested(&this_rq->lock,
+                                             SINGLE_DEPTH_NESTING);
                        ret = 1;
                } else
-                       spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
+                       raw_spin_lock_nested(&busiest->lock,
+                                             SINGLE_DEPTH_NESTING);
        }
        return ret;
 }
@@ -1785,7 +1792,7 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
 {
        if (unlikely(!irqs_disabled())) {
                /* printk() doesn't work good under rq->lock */
-               spin_unlock(&this_rq->lock);
+               raw_spin_unlock(&this_rq->lock);
                BUG_ON(1);
        }
 
@@ -1795,7 +1802,7 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
        __releases(busiest->lock)
 {
-       spin_unlock(&busiest->lock);
+       raw_spin_unlock(&busiest->lock);
        lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
 }
 #endif
@@ -1810,6 +1817,22 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
 #endif
 
 static void calc_load_account_active(struct rq *this_rq);
+static void update_sysctl(void);
+static int get_update_sysctl_factor(void);
+
+static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
+{
+       set_task_rq(p, cpu);
+#ifdef CONFIG_SMP
+       /*
+        * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
+        * successfuly executed on another CPU. We must ensure that updates of
+        * per-task data have been completed by this moment.
+        */
+       smp_wmb();
+       task_thread_info(p)->cpu = cpu;
+#endif
+}
 
 #include "sched_stats.h"
 #include "sched_idletask.c"
@@ -1967,20 +1990,6 @@ inline int task_curr(const struct task_struct *p)
        return cpu_curr(task_cpu(p)) == p;
 }
 
-static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
-{
-       set_task_rq(p, cpu);
-#ifdef CONFIG_SMP
-       /*
-        * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
-        * successfuly executed on another CPU. We must ensure that updates of
-        * per-task data have been completed by this moment.
-        */
-       smp_wmb();
-       task_thread_info(p)->cpu = cpu;
-#endif
-}
-
 static inline void check_class_changed(struct rq *rq, struct task_struct *p,
                                       const struct sched_class *prev_class,
                                       int oldprio, int running)
@@ -2016,13 +2025,13 @@ void kthread_bind(struct task_struct *p, unsigned int cpu)
                return;
        }
 
-       spin_lock_irqsave(&rq->lock, flags);
+       raw_spin_lock_irqsave(&rq->lock, flags);
        update_rq_clock(rq);
        set_task_cpu(p, cpu);
        p->cpus_allowed = cpumask_of_cpu(cpu);
        p->rt.nr_cpus_allowed = 1;
        p->flags |= PF_THREAD_BOUND;
-       spin_unlock_irqrestore(&rq->lock, flags);
+       raw_spin_unlock_irqrestore(&rq->lock, flags);
 }
 EXPORT_SYMBOL(kthread_bind);
 
@@ -2060,29 +2069,13 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
 void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
 {
        int old_cpu = task_cpu(p);
-       struct rq *old_rq = cpu_rq(old_cpu), *new_rq = cpu_rq(new_cpu);
        struct cfs_rq *old_cfsrq = task_cfs_rq(p),
                      *new_cfsrq = cpu_cfs_rq(old_cfsrq, new_cpu);
-       u64 clock_offset;
-
-       clock_offset = old_rq->clock - new_rq->clock;
 
        trace_sched_migrate_task(p, new_cpu);
 
-#ifdef CONFIG_SCHEDSTATS
-       if (p->se.wait_start)
-               p->se.wait_start -= clock_offset;
-       if (p->se.sleep_start)
-               p->se.sleep_start -= clock_offset;
-       if (p->se.block_start)
-               p->se.block_start -= clock_offset;
-#endif
        if (old_cpu != new_cpu) {
                p->se.nr_migrations++;
-#ifdef CONFIG_SCHEDSTATS
-               if (task_hot(p, old_rq->clock, NULL))
-                       schedstat_inc(p, se.nr_forced2_migrations);
-#endif
                perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS,
                                     1, 1, NULL, 0);
        }
@@ -2323,6 +2316,14 @@ void task_oncpu_function_call(struct task_struct *p,
        preempt_enable();
 }
 
+#ifdef CONFIG_SMP
+static inline
+int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
+{
+       return p->sched_class->select_task_rq(p, sd_flags, wake_flags);
+}
+#endif
+
 /***
  * try_to_wake_up - wake up a thread
  * @p: the to-be-woken-up thread
@@ -2374,17 +2375,14 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
        if (task_contributes_to_load(p))
                rq->nr_uninterruptible--;
        p->state = TASK_WAKING;
-       task_rq_unlock(rq, &flags);
+       __task_rq_unlock(rq);
 
-       cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
-       if (cpu != orig_cpu) {
-               local_irq_save(flags);
-               rq = cpu_rq(cpu);
-               update_rq_clock(rq);
+       cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
+       if (cpu != orig_cpu)
                set_task_cpu(p, cpu);
-               local_irq_restore(flags);
-       }
-       rq = task_rq_lock(p, &flags);
+
+       rq = __task_rq_lock(p);
+       update_rq_clock(rq);
 
        WARN_ON(p->state != TASK_WAKING);
        cpu = task_cpu(p);
@@ -2499,7 +2497,6 @@ static void __sched_fork(struct task_struct *p)
        p->se.avg_overlap               = 0;
        p->se.start_runtime             = 0;
        p->se.avg_wakeup                = sysctl_sched_wakeup_granularity;
-       p->se.avg_running               = 0;
 
 #ifdef CONFIG_SCHEDSTATS
        p->se.wait_start                        = 0;
@@ -2521,7 +2518,6 @@ static void __sched_fork(struct task_struct *p)
        p->se.nr_failed_migrations_running      = 0;
        p->se.nr_failed_migrations_hot          = 0;
        p->se.nr_forced_migrations              = 0;
-       p->se.nr_forced2_migrations             = 0;
 
        p->se.nr_wakeups                        = 0;
        p->se.nr_wakeups_sync                   = 0;
@@ -2558,7 +2554,6 @@ static void __sched_fork(struct task_struct *p)
 void sched_fork(struct task_struct *p, int clone_flags)
 {
        int cpu = get_cpu();
-       unsigned long flags;
 
        __sched_fork(p);
 
@@ -2592,13 +2587,13 @@ void sched_fork(struct task_struct *p, int clone_flags)
        if (!rt_prio(p->prio))
                p->sched_class = &fair_sched_class;
 
+       if (p->sched_class->task_fork)
+               p->sched_class->task_fork(p);
+
 #ifdef CONFIG_SMP
-       cpu = p->sched_class->select_task_rq(p, SD_BALANCE_FORK, 0);
+       cpu = select_task_rq(p, SD_BALANCE_FORK, 0);
 #endif
-       local_irq_save(flags);
-       update_rq_clock(cpu_rq(cpu));
        set_task_cpu(p, cpu);
-       local_irq_restore(flags);
 
 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
        if (likely(sched_info_on()))
@@ -2631,17 +2626,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
        rq = task_rq_lock(p, &flags);
        BUG_ON(p->state != TASK_RUNNING);
        update_rq_clock(rq);
-
-       if (!p->sched_class->task_new || !current->se.on_rq) {
-               activate_task(rq, p, 0);
-       } else {
-               /*
-                * Let the scheduling class do new task startup
-                * management (if any):
-                */
-               p->sched_class->task_new(rq, p);
-               inc_nr_running(rq);
-       }
+       activate_task(rq, p, 0);
        trace_sched_wakeup_new(rq, p, 1);
        check_preempt_curr(rq, p, WF_FORK);
 #ifdef CONFIG_SMP
@@ -2768,9 +2753,9 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
        prev_state = prev->state;
        finish_arch_switch(prev);
        perf_event_task_sched_in(current, cpu_of(rq));
-       fire_sched_in_preempt_notifiers(current);
        finish_lock_switch(rq, prev);
 
+       fire_sched_in_preempt_notifiers(current);
        if (mm)
                mmdrop(mm);
        if (unlikely(prev_state == TASK_DEAD)) {
@@ -2798,10 +2783,10 @@ static inline void post_schedule(struct rq *rq)
        if (rq->post_schedule) {
                unsigned long flags;
 
-               spin_lock_irqsave(&rq->lock, flags);
+               raw_spin_lock_irqsave(&rq->lock, flags);
                if (rq->curr->sched_class->post_schedule)
                        rq->curr->sched_class->post_schedule(rq);
-               spin_unlock_irqrestore(&rq->lock, flags);
+               raw_spin_unlock_irqrestore(&rq->lock, flags);
 
                rq->post_schedule = 0;
        }
@@ -3083,15 +3068,15 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2)
 {
        BUG_ON(!irqs_disabled());
        if (rq1 == rq2) {
-               spin_lock(&rq1->lock);
+               raw_spin_lock(&rq1->lock);
                __acquire(rq2->lock);   /* Fake it out ;) */
        } else {
                if (rq1 < rq2) {
-                       spin_lock(&rq1->lock);
-                       spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
+                       raw_spin_lock(&rq1->lock);
+                       raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
                } else {
-                       spin_lock(&rq2->lock);
-                       spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
+                       raw_spin_lock(&rq2->lock);
+                       raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
                }
        }
        update_rq_clock(rq1);
@@ -3108,9 +3093,9 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
        __releases(rq1->lock)
        __releases(rq2->lock)
 {
-       spin_unlock(&rq1->lock);
+       raw_spin_unlock(&rq1->lock);
        if (rq1 != rq2)
-               spin_unlock(&rq2->lock);
+               raw_spin_unlock(&rq2->lock);
        else
                __release(rq2->lock);
 }
@@ -3156,7 +3141,7 @@ out:
 void sched_exec(void)
 {
        int new_cpu, this_cpu = get_cpu();
-       new_cpu = current->sched_class->select_task_rq(current, SD_BALANCE_EXEC, 0);
+       new_cpu = select_task_rq(current, SD_BALANCE_EXEC, 0);
        put_cpu();
        if (new_cpu != this_cpu)
                sched_migrate_task(current, new_cpu);
@@ -3172,10 +3157,6 @@ static void pull_task(struct rq *src_rq, struct task_struct *p,
        deactivate_task(src_rq, p, 0);
        set_task_cpu(p, this_cpu);
        activate_task(this_rq, p, 0);
-       /*
-        * Note that idle threads have a prio of MAX_PRIO, for this test
-        * to be always true for them.
-        */
        check_preempt_curr(this_rq, p, 0);
 }
 
@@ -4134,7 +4115,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
        unsigned long flags;
        struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
 
-       cpumask_copy(cpus, cpu_online_mask);
+       cpumask_copy(cpus, cpu_active_mask);
 
        /*
         * When power savings policy is enabled for the parent domain, idle
@@ -4207,14 +4188,15 @@ redo:
 
                if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
 
-                       spin_lock_irqsave(&busiest->lock, flags);
+                       raw_spin_lock_irqsave(&busiest->lock, flags);
 
                        /* don't kick the migration_thread, if the curr
                         * task on busiest cpu can't be moved to this_cpu
                         */
                        if (!cpumask_test_cpu(this_cpu,
                                              &busiest->curr->cpus_allowed)) {
-                               spin_unlock_irqrestore(&busiest->lock, flags);
+                               raw_spin_unlock_irqrestore(&busiest->lock,
+                                                           flags);
                                all_pinned = 1;
                                goto out_one_pinned;
                        }
@@ -4224,7 +4206,7 @@ redo:
                                busiest->push_cpu = this_cpu;
                                active_balance = 1;
                        }
-                       spin_unlock_irqrestore(&busiest->lock, flags);
+                       raw_spin_unlock_irqrestore(&busiest->lock, flags);
                        if (active_balance)
                                wake_up_process(busiest->migration_thread);
 
@@ -4297,7 +4279,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
        int all_pinned = 0;
        struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
 
-       cpumask_copy(cpus, cpu_online_mask);
+       cpumask_copy(cpus, cpu_active_mask);
 
        /*
         * When power savings policy is enabled for the parent domain, idle
@@ -4406,10 +4388,10 @@ redo:
                /*
                 * Should not call ttwu while holding a rq->lock
                 */
-               spin_unlock(&this_rq->lock);
+               raw_spin_unlock(&this_rq->lock);
                if (active_balance)
                        wake_up_process(busiest->migration_thread);
-               spin_lock(&this_rq->lock);
+               raw_spin_lock(&this_rq->lock);
 
        } else
                sd->nr_balance_failed = 0;
@@ -4694,7 +4676,7 @@ int select_nohz_load_balancer(int stop_tick)
                cpumask_set_cpu(cpu, nohz.cpu_mask);
 
                /* time for ilb owner also to sleep */
-               if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) {
+               if (cpumask_weight(nohz.cpu_mask) == num_active_cpus()) {
                        if (atomic_read(&nohz.load_balancer) == cpu)
                                atomic_set(&nohz.load_balancer, -1);
                        return 0;
@@ -5184,10 +5166,18 @@ void account_idle_ticks(unsigned long ticks)
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
 void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
 {
-       if (ut)
-               *ut = p->utime;
-       if (st)
-               *st = p->stime;
+       *ut = p->utime;
+       *st = p->stime;
+}
+
+void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
+{
+       struct task_cputime cputime;
+
+       thread_group_cputime(p, &cputime);
+
+       *ut = cputime.utime;
+       *st = cputime.stime;
 }
 #else
 
@@ -5197,7 +5187,7 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
 
 void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
 {
-       cputime_t rtime, utime = p->utime, total = utime + p->stime;
+       cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime);
 
        /*
         * Use CFS's precise accounting:
@@ -5217,12 +5207,41 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
         * Compare with previous values, to keep monotonicity:
         */
        p->prev_utime = max(p->prev_utime, utime);
-       p->prev_stime = max(p->prev_stime, rtime - p->prev_utime);
+       p->prev_stime = max(p->prev_stime, cputime_sub(rtime, p->prev_utime));
 
-       if (ut)
-               *ut = p->prev_utime;
-       if (st)
-               *st = p->prev_stime;
+       *ut = p->prev_utime;
+       *st = p->prev_stime;
+}
+
+/*
+ * Must be called with siglock held.
+ */
+void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
+{
+       struct signal_struct *sig = p->signal;
+       struct task_cputime cputime;
+       cputime_t rtime, utime, total;
+
+       thread_group_cputime(p, &cputime);
+
+       total = cputime_add(cputime.utime, cputime.stime);
+       rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
+
+       if (total) {
+               u64 temp;
+
+               temp = (u64)(rtime * cputime.utime);
+               do_div(temp, total);
+               utime = (cputime_t)temp;
+       } else
+               utime = rtime;
+
+       sig->prev_utime = max(sig->prev_utime, utime);
+       sig->prev_stime = max(sig->prev_stime,
+                             cputime_sub(rtime, sig->prev_utime));
+
+       *ut = sig->prev_utime;
+       *st = sig->prev_stime;
 }
 #endif
 
@@ -5241,11 +5260,11 @@ void scheduler_tick(void)
 
        sched_clock_tick();
 
-       spin_lock(&rq->lock);
+       raw_spin_lock(&rq->lock);
        update_rq_clock(rq);
        update_cpu_load(rq);
        curr->sched_class->task_tick(rq, curr, 0);
-       spin_unlock(&rq->lock);
+       raw_spin_unlock(&rq->lock);
 
        perf_event_task_tick(curr, cpu);
 
@@ -5359,13 +5378,14 @@ static inline void schedule_debug(struct task_struct *prev)
 #endif
 }
 
-static void put_prev_task(struct rq *rq, struct task_struct *p)
+static void put_prev_task(struct rq *rq, struct task_struct *prev)
 {
-       u64 runtime = p->se.sum_exec_runtime - p->se.prev_sum_exec_runtime;
+       if (prev->state == TASK_RUNNING) {
+               u64 runtime = prev->se.sum_exec_runtime;
 
-       update_avg(&p->se.avg_running, runtime);
+               runtime -= prev->se.prev_sum_exec_runtime;
+               runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
 
-       if (p->state == TASK_RUNNING) {
                /*
                 * In order to avoid avg_overlap growing stale when we are
                 * indeed overlapping and hence not getting put to sleep, grow
@@ -5375,12 +5395,9 @@ static void put_prev_task(struct rq *rq, struct task_struct *p)
                 * correlates to the amount of cache footprint a task can
                 * build up.
                 */
-               runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
-               update_avg(&p->se.avg_overlap, runtime);
-       } else {
-               update_avg(&p->se.avg_running, 0);
+               update_avg(&prev->se.avg_overlap, runtime);
        }
-       p->sched_class->put_prev_task(rq, p);
+       prev->sched_class->put_prev_task(rq, prev);
 }
 
 /*
@@ -5441,7 +5458,7 @@ need_resched_nonpreemptible:
        if (sched_feat(HRTICK))
                hrtick_clear(rq);
 
-       spin_lock_irq(&rq->lock);
+       raw_spin_lock_irq(&rq->lock);
        update_rq_clock(rq);
        clear_tsk_need_resched(prev);
 
@@ -5477,7 +5494,7 @@ need_resched_nonpreemptible:
                cpu = smp_processor_id();
                rq = cpu_rq(cpu);
        } else
-               spin_unlock_irq(&rq->lock);
+               raw_spin_unlock_irq(&rq->lock);
 
        post_schedule(rq);
 
@@ -5490,7 +5507,7 @@ need_resched_nonpreemptible:
 }
 EXPORT_SYMBOL(schedule);
 
-#ifdef CONFIG_SMP
+#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
 /*
  * Look out! "owner" is an entirely speculative pointer
  * access and not reliable.
@@ -6306,7 +6323,7 @@ recheck:
         * make sure no PI-waiters arrive (or leave) while we are
         * changing the priority of the task:
         */
-       spin_lock_irqsave(&p->pi_lock, flags);
+       raw_spin_lock_irqsave(&p->pi_lock, flags);
        /*
         * To be able to change p->policy safely, the apropriate
         * runqueue lock must be held.
@@ -6316,7 +6333,7 @@ recheck:
        if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
                policy = oldpolicy = -1;
                __task_rq_unlock(rq);
-               spin_unlock_irqrestore(&p->pi_lock, flags);
+               raw_spin_unlock_irqrestore(&p->pi_lock, flags);
                goto recheck;
        }
        update_rq_clock(rq);
@@ -6340,7 +6357,7 @@ recheck:
                check_class_changed(rq, p, prev_class, oldprio, running);
        }
        __task_rq_unlock(rq);
-       spin_unlock_irqrestore(&p->pi_lock, flags);
+       raw_spin_unlock_irqrestore(&p->pi_lock, flags);
 
        rt_mutex_adjust_pi(p);
 
@@ -6594,6 +6611,8 @@ SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
 long sched_getaffinity(pid_t pid, struct cpumask *mask)
 {
        struct task_struct *p;
+       unsigned long flags;
+       struct rq *rq;
        int retval;
 
        get_online_cpus();
@@ -6608,7 +6627,9 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
        if (retval)
                goto out_unlock;
 
+       rq = task_rq_lock(p, &flags);
        cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
+       task_rq_unlock(rq, &flags);
 
 out_unlock:
        read_unlock(&tasklist_lock);
@@ -6666,7 +6687,7 @@ SYSCALL_DEFINE0(sched_yield)
         */
        __release(rq->lock);
        spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
-       _raw_spin_unlock(&rq->lock);
+       do_raw_spin_unlock(&rq->lock);
        preempt_enable_no_resched();
 
        schedule();
@@ -6846,6 +6867,8 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
 {
        struct task_struct *p;
        unsigned int time_slice;
+       unsigned long flags;
+       struct rq *rq;
        int retval;
        struct timespec t;
 
@@ -6862,7 +6885,9 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
        if (retval)
                goto out_unlock;
 
-       time_slice = p->sched_class->get_rr_interval(p);
+       rq = task_rq_lock(p, &flags);
+       time_slice = p->sched_class->get_rr_interval(rq, p);
+       task_rq_unlock(rq, &flags);
 
        read_unlock(&tasklist_lock);
        jiffies_to_timespec(time_slice, &t);
@@ -6958,12 +6983,11 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
        struct rq *rq = cpu_rq(cpu);
        unsigned long flags;
 
-       spin_lock_irqsave(&rq->lock, flags);
+       raw_spin_lock_irqsave(&rq->lock, flags);
 
        __sched_fork(idle);
        idle->se.exec_start = sched_clock();
 
-       idle->prio = idle->normal_prio = MAX_PRIO;
        cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
        __set_task_cpu(idle, cpu);
 
@@ -6971,7 +6995,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
 #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
        idle->oncpu = 1;
 #endif
-       spin_unlock_irqrestore(&rq->lock, flags);
+       raw_spin_unlock_irqrestore(&rq->lock, flags);
 
        /* Set the preempt count _outside_ the spinlocks! */
 #if defined(CONFIG_PREEMPT)
@@ -7004,22 +7028,43 @@ cpumask_var_t nohz_cpu_mask;
  *
  * This idea comes from the SD scheduler of Con Kolivas:
  */
-static inline void sched_init_granularity(void)
+static int get_update_sysctl_factor(void)
 {
-       unsigned int factor = 1 + ilog2(num_online_cpus());
-       const unsigned long limit = 200000000;
+       unsigned int cpus = min_t(int, num_online_cpus(), 8);
+       unsigned int factor;
+
+       switch (sysctl_sched_tunable_scaling) {
+       case SCHED_TUNABLESCALING_NONE:
+               factor = 1;
+               break;
+       case SCHED_TUNABLESCALING_LINEAR:
+               factor = cpus;
+               break;
+       case SCHED_TUNABLESCALING_LOG:
+       default:
+               factor = 1 + ilog2(cpus);
+               break;
+       }
 
-       sysctl_sched_min_granularity *= factor;
-       if (sysctl_sched_min_granularity > limit)
-               sysctl_sched_min_granularity = limit;
+       return factor;
+}
 
-       sysctl_sched_latency *= factor;
-       if (sysctl_sched_latency > limit)
-               sysctl_sched_latency = limit;
+static void update_sysctl(void)
+{
+       unsigned int factor = get_update_sysctl_factor();
 
-       sysctl_sched_wakeup_granularity *= factor;
+#define SET_SYSCTL(name) \
+       (sysctl_##name = (factor) * normalized_sysctl_##name)
+       SET_SYSCTL(sched_min_granularity);
+       SET_SYSCTL(sched_latency);
+       SET_SYSCTL(sched_wakeup_granularity);
+       SET_SYSCTL(sched_shares_ratelimit);
+#undef SET_SYSCTL
+}
 
-       sysctl_sched_shares_ratelimit *= factor;
+static inline void sched_init_granularity(void)
+{
+       update_sysctl();
 }
 
 #ifdef CONFIG_SMP
@@ -7056,7 +7101,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
        int ret = 0;
 
        rq = task_rq_lock(p, &flags);
-       if (!cpumask_intersects(new_mask, cpu_online_mask)) {
+       if (!cpumask_intersects(new_mask, cpu_active_mask)) {
                ret = -EINVAL;
                goto out;
        }
@@ -7078,7 +7123,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
        if (cpumask_test_cpu(task_cpu(p), new_mask))
                goto out;
 
-       if (migrate_task(p, cpumask_any_and(cpu_online_mask, new_mask), &req)) {
+       if (migrate_task(p, cpumask_any_and(cpu_active_mask, new_mask), &req)) {
                /* Need help from migration thread: drop lock and wait. */
                struct task_struct *mt = rq->migration_thread;
 
@@ -7167,10 +7212,10 @@ static int migration_thread(void *data)
                struct migration_req *req;
                struct list_head *head;
 
-               spin_lock_irq(&rq->lock);
+               raw_spin_lock_irq(&rq->lock);
 
                if (cpu_is_offline(cpu)) {
-                       spin_unlock_irq(&rq->lock);
+                       raw_spin_unlock_irq(&rq->lock);
                        break;
                }
 
@@ -7182,7 +7227,7 @@ static int migration_thread(void *data)
                head = &rq->migration_queue;
 
                if (list_empty(head)) {
-                       spin_unlock_irq(&rq->lock);
+                       raw_spin_unlock_irq(&rq->lock);
                        schedule();
                        set_current_state(TASK_INTERRUPTIBLE);
                        continue;
@@ -7191,14 +7236,14 @@ static int migration_thread(void *data)
                list_del_init(head->next);
 
                if (req->task != NULL) {
-                       spin_unlock(&rq->lock);
+                       raw_spin_unlock(&rq->lock);
                        __migrate_task(req->task, cpu, req->dest_cpu);
                } else if (likely(cpu == (badcpu = smp_processor_id()))) {
                        req->dest_cpu = RCU_MIGRATION_GOT_QS;
-                       spin_unlock(&rq->lock);
+                       raw_spin_unlock(&rq->lock);
                } else {
                        req->dest_cpu = RCU_MIGRATION_MUST_SYNC;
-                       spin_unlock(&rq->lock);
+                       raw_spin_unlock(&rq->lock);
                        WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu);
                }
                local_irq_enable();
@@ -7232,19 +7277,19 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
 
 again:
        /* Look for allowed, online CPU in same node. */
-       for_each_cpu_and(dest_cpu, nodemask, cpu_online_mask)
+       for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
                if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
                        goto move;
 
        /* Any allowed, online CPU? */
-       dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_online_mask);
+       dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
        if (dest_cpu < nr_cpu_ids)
                goto move;
 
        /* No more Mr. Nice Guy. */
        if (dest_cpu >= nr_cpu_ids) {
                cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
-               dest_cpu = cpumask_any_and(cpu_online_mask, &p->cpus_allowed);
+               dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
 
                /*
                 * Don't tell them about moving exiting tasks or
@@ -7273,7 +7318,7 @@ move:
  */
 static void migrate_nr_uninterruptible(struct rq *rq_src)
 {
-       struct rq *rq_dest = cpu_rq(cpumask_any(cpu_online_mask));
+       struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
        unsigned long flags;
 
        local_irq_save(flags);
@@ -7321,14 +7366,14 @@ void sched_idle_next(void)
         * Strictly not necessary since rest of the CPUs are stopped by now
         * and interrupts disabled on the current cpu.
         */
-       spin_lock_irqsave(&rq->lock, flags);
+       raw_spin_lock_irqsave(&rq->lock, flags);
 
        __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
 
        update_rq_clock(rq);
        activate_task(rq, p, 0);
 
-       spin_unlock_irqrestore(&rq->lock, flags);
+       raw_spin_unlock_irqrestore(&rq->lock, flags);
 }
 
 /*
@@ -7364,9 +7409,9 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
         * that's OK. No task can be added to this CPU, so iteration is
         * fine.
         */
-       spin_unlock_irq(&rq->lock);
+       raw_spin_unlock_irq(&rq->lock);
        move_task_off_dead_cpu(dead_cpu, p);
-       spin_lock_irq(&rq->lock);
+       raw_spin_lock_irq(&rq->lock);
 
        put_task_struct(p);
 }
@@ -7407,17 +7452,16 @@ static struct ctl_table sd_ctl_dir[] = {
                .procname       = "sched_domain",
                .mode           = 0555,
        },
-       {0, },
+       {}
 };
 
 static struct ctl_table sd_ctl_root[] = {
        {
-               .ctl_name       = CTL_KERN,
                .procname       = "kernel",
                .mode           = 0555,
                .child          = sd_ctl_dir,
        },
-       {0, },
+       {}
 };
 
 static struct ctl_table *sd_alloc_ctl_entry(int n)
@@ -7527,7 +7571,7 @@ static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
 static struct ctl_table_header *sd_sysctl_header;
 static void register_sched_domain_sysctl(void)
 {
-       int i, cpu_num = num_online_cpus();
+       int i, cpu_num = num_possible_cpus();
        struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
        char buf[32];
 
@@ -7537,7 +7581,7 @@ static void register_sched_domain_sysctl(void)
        if (entry == NULL)
                return;
 
-       for_each_online_cpu(i) {
+       for_each_possible_cpu(i) {
                snprintf(buf, 32, "cpu%d", i);
                entry->procname = kstrdup(buf, GFP_KERNEL);
                entry->mode = 0555;
@@ -7633,13 +7677,13 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
 
                /* Update our root-domain */
                rq = cpu_rq(cpu);
-               spin_lock_irqsave(&rq->lock, flags);
+               raw_spin_lock_irqsave(&rq->lock, flags);
                if (rq->rd) {
                        BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
 
                        set_rq_online(rq);
                }
-               spin_unlock_irqrestore(&rq->lock, flags);
+               raw_spin_unlock_irqrestore(&rq->lock, flags);
                break;
 
 #ifdef CONFIG_HOTPLUG_CPU
@@ -7664,14 +7708,13 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
                put_task_struct(rq->migration_thread);
                rq->migration_thread = NULL;
                /* Idle task back to normal (off runqueue, low prio) */
-               spin_lock_irq(&rq->lock);
+               raw_spin_lock_irq(&rq->lock);
                update_rq_clock(rq);
                deactivate_task(rq, rq->idle, 0);
-               rq->idle->static_prio = MAX_PRIO;
                __setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
                rq->idle->sched_class = &idle_sched_class;
                migrate_dead_tasks(cpu);
-               spin_unlock_irq(&rq->lock);
+               raw_spin_unlock_irq(&rq->lock);
                cpuset_unlock();
                migrate_nr_uninterruptible(rq);
                BUG_ON(rq->nr_running != 0);
@@ -7681,30 +7724,30 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
                 * they didn't take sched_hotcpu_mutex. Just wake up
                 * the requestors.
                 */
-               spin_lock_irq(&rq->lock);
+               raw_spin_lock_irq(&rq->lock);
                while (!list_empty(&rq->migration_queue)) {
                        struct migration_req *req;
 
                        req = list_entry(rq->migration_queue.next,
                                         struct migration_req, list);
                        list_del_init(&req->list);
-                       spin_unlock_irq(&rq->lock);
+                       raw_spin_unlock_irq(&rq->lock);
                        complete(&req->done);
-                       spin_lock_irq(&rq->lock);
+                       raw_spin_lock_irq(&rq->lock);
                }
-               spin_unlock_irq(&rq->lock);
+               raw_spin_unlock_irq(&rq->lock);
                break;
 
        case CPU_DYING:
        case CPU_DYING_FROZEN:
                /* Update our root-domain */
                rq = cpu_rq(cpu);
-               spin_lock_irqsave(&rq->lock, flags);
+               raw_spin_lock_irqsave(&rq->lock, flags);
                if (rq->rd) {
                        BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
                        set_rq_offline(rq);
                }
-               spin_unlock_irqrestore(&rq->lock, flags);
+               raw_spin_unlock_irqrestore(&rq->lock, flags);
                break;
 #endif
        }
@@ -7934,7 +7977,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
        struct root_domain *old_rd = NULL;
        unsigned long flags;
 
-       spin_lock_irqsave(&rq->lock, flags);
+       raw_spin_lock_irqsave(&rq->lock, flags);
 
        if (rq->rd) {
                old_rd = rq->rd;
@@ -7960,7 +8003,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
        if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
                set_rq_online(rq);
 
-       spin_unlock_irqrestore(&rq->lock, flags);
+       raw_spin_unlock_irqrestore(&rq->lock, flags);
 
        if (old_rd)
                free_rootdomain(old_rd);
@@ -8061,6 +8104,7 @@ static cpumask_var_t cpu_isolated_map;
 /* Setup the mask of cpus configured for isolated domains */
 static int __init isolated_cpu_setup(char *str)
 {
+       alloc_bootmem_cpumask_var(&cpu_isolated_map);
        cpulist_parse(str, cpu_isolated_map);
        return 1;
 }
@@ -8245,14 +8289,14 @@ enum s_alloc {
  */
 #ifdef CONFIG_SCHED_SMT
 static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains);
-static DEFINE_PER_CPU(struct static_sched_group, sched_group_cpus);
+static DEFINE_PER_CPU(struct static_sched_group, sched_groups);
 
 static int
 cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map,
                 struct sched_group **sg, struct cpumask *unused)
 {
        if (sg)
-               *sg = &per_cpu(sched_group_cpus, cpu).sg;
+               *sg = &per_cpu(sched_groups, cpu).sg;
        return cpu;
 }
 #endif /* CONFIG_SCHED_SMT */
@@ -9062,7 +9106,7 @@ match1:
        if (doms_new == NULL) {
                ndoms_cur = 0;
                doms_new = &fallback_doms;
-               cpumask_andnot(doms_new[0], cpu_online_mask, cpu_isolated_map);
+               cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
                WARN_ON_ONCE(dattr_new);
        }
 
@@ -9193,8 +9237,10 @@ static int update_sched_domains(struct notifier_block *nfb,
        switch (action) {
        case CPU_ONLINE:
        case CPU_ONLINE_FROZEN:
-       case CPU_DEAD:
-       case CPU_DEAD_FROZEN:
+       case CPU_DOWN_PREPARE:
+       case CPU_DOWN_PREPARE_FROZEN:
+       case CPU_DOWN_FAILED:
+       case CPU_DOWN_FAILED_FROZEN:
                partition_sched_domains(1, NULL, NULL);
                return NOTIFY_OK;
 
@@ -9241,7 +9287,7 @@ void __init sched_init_smp(void)
 #endif
        get_online_cpus();
        mutex_lock(&sched_domains_mutex);
-       arch_init_sched_domains(cpu_online_mask);
+       arch_init_sched_domains(cpu_active_mask);
        cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
        if (cpumask_empty(non_isolated_cpus))
                cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
@@ -9314,13 +9360,13 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
 #ifdef CONFIG_SMP
        rt_rq->rt_nr_migratory = 0;
        rt_rq->overloaded = 0;
-       plist_head_init(&rt_rq->pushable_tasks, &rq->lock);
+       plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock);
 #endif
 
        rt_rq->rt_time = 0;
        rt_rq->rt_throttled = 0;
        rt_rq->rt_runtime = 0;
-       spin_lock_init(&rt_rq->rt_runtime_lock);
+       raw_spin_lock_init(&rt_rq->rt_runtime_lock);
 
 #ifdef CONFIG_RT_GROUP_SCHED
        rt_rq->rt_nr_boosted = 0;
@@ -9480,7 +9526,7 @@ void __init sched_init(void)
                struct rq *rq;
 
                rq = cpu_rq(i);
-               spin_lock_init(&rq->lock);
+               raw_spin_lock_init(&rq->lock);
                rq->nr_running = 0;
                rq->calc_load_active = 0;
                rq->calc_load_update = jiffies + LOAD_FREQ;
@@ -9540,7 +9586,7 @@ void __init sched_init(void)
 #elif defined CONFIG_USER_SCHED
                init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL);
                init_tg_rt_entry(&init_task_group,
-                               &per_cpu(init_rt_rq, i),
+                               &per_cpu(init_rt_rq_var, i),
                                &per_cpu(init_sched_rt_entity, i), i, 1,
                                root_task_group.rt_se[i]);
 #endif
@@ -9578,7 +9624,7 @@ void __init sched_init(void)
 #endif
 
 #ifdef CONFIG_RT_MUTEXES
-       plist_head_init(&init_task.pi_waiters, &init_task.pi_lock);
+       plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock);
 #endif
 
        /*
@@ -9609,7 +9655,9 @@ void __init sched_init(void)
        zalloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT);
        alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT);
 #endif
-       zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
+       /* May be allocated at isolcpus cmdline parse time */
+       if (cpu_isolated_map == NULL)
+               zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
 #endif /* SMP */
 
        perf_event_init();
@@ -9701,13 +9749,13 @@ void normalize_rt_tasks(void)
                        continue;
                }
 
-               spin_lock(&p->pi_lock);
+               raw_spin_lock(&p->pi_lock);
                rq = __task_rq_lock(p);
 
                normalize_task(rq, p);
 
                __task_rq_unlock(rq);
-               spin_unlock(&p->pi_lock);
+               raw_spin_unlock(&p->pi_lock);
        } while_each_thread(g, p);
 
        read_unlock_irqrestore(&tasklist_lock, flags);
@@ -9803,13 +9851,15 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
                se = kzalloc_node(sizeof(struct sched_entity),
                                  GFP_KERNEL, cpu_to_node(i));
                if (!se)
-                       goto err;
+                       goto err_free_rq;
 
                init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent->se[i]);
        }
 
        return 1;
 
+ err_free_rq:
+       kfree(cfs_rq);
  err:
        return 0;
 }
@@ -9891,13 +9941,15 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
                rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
                                     GFP_KERNEL, cpu_to_node(i));
                if (!rt_se)
-                       goto err;
+                       goto err_free_rq;
 
                init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent->rt_se[i]);
        }
 
        return 1;
 
+ err_free_rq:
+       kfree(rt_rq);
  err:
        return 0;
 }
@@ -10066,9 +10118,9 @@ static void set_se_shares(struct sched_entity *se, unsigned long shares)
        struct rq *rq = cfs_rq->rq;
        unsigned long flags;
 
-       spin_lock_irqsave(&rq->lock, flags);
+       raw_spin_lock_irqsave(&rq->lock, flags);
        __set_se_shares(se, shares);
-       spin_unlock_irqrestore(&rq->lock, flags);
+       raw_spin_unlock_irqrestore(&rq->lock, flags);
 }
 
 static DEFINE_MUTEX(shares_mutex);
@@ -10253,18 +10305,18 @@ static int tg_set_bandwidth(struct task_group *tg,
        if (err)
                goto unlock;
 
-       spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
+       raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
        tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
        tg->rt_bandwidth.rt_runtime = rt_runtime;
 
        for_each_possible_cpu(i) {
                struct rt_rq *rt_rq = tg->rt_rq[i];
 
-               spin_lock(&rt_rq->rt_runtime_lock);
+               raw_spin_lock(&rt_rq->rt_runtime_lock);
                rt_rq->rt_runtime = rt_runtime;
-               spin_unlock(&rt_rq->rt_runtime_lock);
+               raw_spin_unlock(&rt_rq->rt_runtime_lock);
        }
-       spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
+       raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
  unlock:
        read_unlock(&tasklist_lock);
        mutex_unlock(&rt_constraints_mutex);
@@ -10369,15 +10421,15 @@ static int sched_rt_global_constraints(void)
        if (sysctl_sched_rt_runtime == 0)
                return -EBUSY;
 
-       spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
+       raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
        for_each_possible_cpu(i) {
                struct rt_rq *rt_rq = &cpu_rq(i)->rt;
 
-               spin_lock(&rt_rq->rt_runtime_lock);
+               raw_spin_lock(&rt_rq->rt_runtime_lock);
                rt_rq->rt_runtime = global_rt_runtime();
-               spin_unlock(&rt_rq->rt_runtime_lock);
+               raw_spin_unlock(&rt_rq->rt_runtime_lock);
        }
-       spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
+       raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
 
        return 0;
 }
@@ -10668,9 +10720,9 @@ static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
        /*
         * Take rq->lock to make 64-bit read safe on 32-bit platforms.
         */
-       spin_lock_irq(&cpu_rq(cpu)->lock);
+       raw_spin_lock_irq(&cpu_rq(cpu)->lock);
        data = *cpuusage;
-       spin_unlock_irq(&cpu_rq(cpu)->lock);
+       raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
 #else
        data = *cpuusage;
 #endif
@@ -10686,9 +10738,9 @@ static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
        /*
         * Take rq->lock to make 64-bit write safe on 32-bit platforms.
         */
-       spin_lock_irq(&cpu_rq(cpu)->lock);
+       raw_spin_lock_irq(&cpu_rq(cpu)->lock);
        *cpuusage = val;
-       spin_unlock_irq(&cpu_rq(cpu)->lock);
+       raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
 #else
        *cpuusage = val;
 #endif
@@ -10922,9 +10974,9 @@ void synchronize_sched_expedited(void)
                init_completion(&req->done);
                req->task = NULL;
                req->dest_cpu = RCU_MIGRATION_NEED_QS;
-               spin_lock_irqsave(&rq->lock, flags);
+               raw_spin_lock_irqsave(&rq->lock, flags);
                list_add(&req->list, &rq->migration_queue);
-               spin_unlock_irqrestore(&rq->lock, flags);
+               raw_spin_unlock_irqrestore(&rq->lock, flags);
                wake_up_process(rq->migration_thread);
        }
        for_each_online_cpu(cpu) {
@@ -10932,13 +10984,14 @@ void synchronize_sched_expedited(void)
                req = &per_cpu(rcu_migration_req, cpu);
                rq = cpu_rq(cpu);
                wait_for_completion(&req->done);
-               spin_lock_irqsave(&rq->lock, flags);
+               raw_spin_lock_irqsave(&rq->lock, flags);
                if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC))
                        need_full_sync = 1;
                req->dest_cpu = RCU_MIGRATION_IDLE;
-               spin_unlock_irqrestore(&rq->lock, flags);
+               raw_spin_unlock_irqrestore(&rq->lock, flags);
        }
        rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
+       synchronize_sched_expedited_count++;
        mutex_unlock(&rcu_sched_expedited_mutex);
        put_online_cpus();
        if (need_full_sync)