sched: Reassign prev and switch_count when reacquire_kernel_lock() fail
[safe/jmp/linux-2.6] / kernel / sched.c
index 3dbe617..4508fe7 100644 (file)
@@ -141,7 +141,7 @@ struct rt_prio_array {
 
 struct rt_bandwidth {
        /* nests inside the rq lock: */
-       spinlock_t              rt_runtime_lock;
+       raw_spinlock_t          rt_runtime_lock;
        ktime_t                 rt_period;
        u64                     rt_runtime;
        struct hrtimer          rt_period_timer;
@@ -178,7 +178,7 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
        rt_b->rt_period = ns_to_ktime(period);
        rt_b->rt_runtime = runtime;
 
-       spin_lock_init(&rt_b->rt_runtime_lock);
+       raw_spin_lock_init(&rt_b->rt_runtime_lock);
 
        hrtimer_init(&rt_b->rt_period_timer,
                        CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -200,7 +200,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
        if (hrtimer_active(&rt_b->rt_period_timer))
                return;
 
-       spin_lock(&rt_b->rt_runtime_lock);
+       raw_spin_lock(&rt_b->rt_runtime_lock);
        for (;;) {
                unsigned long delta;
                ktime_t soft, hard;
@@ -217,7 +217,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
                __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
                                HRTIMER_MODE_ABS_PINNED, 0);
        }
-       spin_unlock(&rt_b->rt_runtime_lock);
+       raw_spin_unlock(&rt_b->rt_runtime_lock);
 }
 
 #ifdef CONFIG_RT_GROUP_SCHED
@@ -470,7 +470,7 @@ struct rt_rq {
        u64 rt_time;
        u64 rt_runtime;
        /* Nests inside the rq lock: */
-       spinlock_t rt_runtime_lock;
+       raw_spinlock_t rt_runtime_lock;
 
 #ifdef CONFIG_RT_GROUP_SCHED
        unsigned long rt_nr_boosted;
@@ -2002,39 +2002,6 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
                p->sched_class->prio_changed(rq, p, oldprio, running);
 }
 
-/**
- * kthread_bind - bind a just-created kthread to a cpu.
- * @p: thread created by kthread_create().
- * @cpu: cpu (might not be online, must be possible) for @k to run on.
- *
- * Description: This function is equivalent to set_cpus_allowed(),
- * except that @cpu doesn't need to be online, and the thread must be
- * stopped (i.e., just returned from kthread_create()).
- *
- * Function lives here instead of kthread.c because it messes with
- * scheduler internals which require locking.
- */
-void kthread_bind(struct task_struct *p, unsigned int cpu)
-{
-       struct rq *rq = cpu_rq(cpu);
-       unsigned long flags;
-
-       /* Must have done schedule() in kthread() before we set_task_cpu */
-       if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
-               WARN_ON(1);
-               return;
-       }
-
-       raw_spin_lock_irqsave(&rq->lock, flags);
-       update_rq_clock(rq);
-       set_task_cpu(p, cpu);
-       p->cpus_allowed = cpumask_of_cpu(cpu);
-       p->rt.nr_cpus_allowed = 1;
-       p->flags |= PF_THREAD_BOUND;
-       raw_spin_unlock_irqrestore(&rq->lock, flags);
-}
-EXPORT_SYMBOL(kthread_bind);
-
 #ifdef CONFIG_SMP
 /*
  * Is this task likely cache-hot:
@@ -2044,6 +2011,9 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
 {
        s64 delta;
 
+       if (p->sched_class != &fair_sched_class)
+               return 0;
+
        /*
         * Buddy candidates are cache hot:
         */
@@ -2052,9 +2022,6 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
                         &p->se == cfs_rq_of(&p->se)->last))
                return 1;
 
-       if (p->sched_class != &fair_sched_class)
-               return 0;
-
        if (sysctl_sched_migration_cost == -1)
                return 1;
        if (sysctl_sched_migration_cost == 0)
@@ -2065,22 +2032,23 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
        return delta < (s64)sysctl_sched_migration_cost;
 }
 
-
 void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
 {
-       int old_cpu = task_cpu(p);
-       struct cfs_rq *old_cfsrq = task_cfs_rq(p),
-                     *new_cfsrq = cpu_cfs_rq(old_cfsrq, new_cpu);
+#ifdef CONFIG_SCHED_DEBUG
+       /*
+        * We should never call set_task_cpu() on a blocked task,
+        * ttwu() will sort out the placement.
+        */
+       WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
+                       !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
+#endif
 
        trace_sched_migrate_task(p, new_cpu);
 
-       if (old_cpu != new_cpu) {
+       if (task_cpu(p) != new_cpu) {
                p->se.nr_migrations++;
-               perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS,
-                                    1, 1, NULL, 0);
+               perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0);
        }
-       p->se.vruntime -= old_cfsrq->min_vruntime -
-                                        new_cfsrq->min_vruntime;
 
        __set_task_cpu(p, new_cpu);
 }
@@ -2105,13 +2073,10 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
 
        /*
         * If the task is not on a runqueue (and not running), then
-        * it is sufficient to simply update the task's cpu field.
+        * the next wake-up will properly place the task.
         */
-       if (!p->se.on_rq && !task_running(rq, p)) {
-               update_rq_clock(rq);
-               set_task_cpu(p, dest_cpu);
+       if (!p->se.on_rq && !task_running(rq, p))
                return 0;
-       }
 
        init_completion(&req->done);
        req->task = p;
@@ -2317,10 +2282,73 @@ void task_oncpu_function_call(struct task_struct *p,
 }
 
 #ifdef CONFIG_SMP
+static int select_fallback_rq(int cpu, struct task_struct *p)
+{
+       int dest_cpu;
+       const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
+
+       /* Look for allowed, online CPU in same node. */
+       for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
+               if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
+                       return dest_cpu;
+
+       /* Any allowed, online CPU? */
+       dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
+       if (dest_cpu < nr_cpu_ids)
+               return dest_cpu;
+
+       /* No more Mr. Nice Guy. */
+       if (dest_cpu >= nr_cpu_ids) {
+               rcu_read_lock();
+               cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
+               rcu_read_unlock();
+               dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
+
+               /*
+                * Don't tell them about moving exiting tasks or
+                * kernel threads (both mm NULL), since they never
+                * leave kernel.
+                */
+               if (p->mm && printk_ratelimit()) {
+                       printk(KERN_INFO "process %d (%s) no "
+                              "longer affine to cpu%d\n",
+                              task_pid_nr(p), p->comm, cpu);
+               }
+       }
+
+       return dest_cpu;
+}
+
+/*
+ * Called from:
+ *
+ *  - fork, @p is stable because it isn't on the tasklist yet
+ *
+ *  - exec, @p is unstable, retry loop
+ *
+ *  - wake-up, we serialize ->cpus_allowed against TASK_WAKING so
+ *             we should be good.
+ */
 static inline
 int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
 {
-       return p->sched_class->select_task_rq(p, sd_flags, wake_flags);
+       int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
+
+       /*
+        * In order not to call set_task_cpu() on a blocking task we need
+        * to rely on ttwu() to place the task on a valid ->cpus_allowed
+        * cpu.
+        *
+        * Since this is common to all placement strategies, this lives here.
+        *
+        * [ this allows ->select_task() to simply return task_cpu(p) and
+        *   not worry about this generic constraint ]
+        */
+       if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
+                    !cpu_online(cpu)))
+               cpu = select_fallback_rq(task_cpu(p), p);
+
+       return cpu;
 }
 #endif
 
@@ -2375,6 +2403,10 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
        if (task_contributes_to_load(p))
                rq->nr_uninterruptible--;
        p->state = TASK_WAKING;
+
+       if (p->sched_class->task_waking)
+               p->sched_class->task_waking(rq, p);
+
        __task_rq_unlock(rq);
 
        cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
@@ -2438,8 +2470,8 @@ out_running:
 
        p->state = TASK_RUNNING;
 #ifdef CONFIG_SMP
-       if (p->sched_class->task_wake_up)
-               p->sched_class->task_wake_up(rq, p);
+       if (p->sched_class->task_woken)
+               p->sched_class->task_woken(rq, p);
 
        if (unlikely(rq->idle_stamp)) {
                u64 delta = rq->clock - rq->idle_stamp;
@@ -2538,14 +2570,6 @@ static void __sched_fork(struct task_struct *p)
 #ifdef CONFIG_PREEMPT_NOTIFIERS
        INIT_HLIST_HEAD(&p->preempt_notifiers);
 #endif
-
-       /*
-        * We mark the process as running here, but have not actually
-        * inserted it onto the runqueue yet. This guarantees that
-        * nobody will actually run it, and a signal or other external
-        * event cannot wake it up and insert it on the runqueue either.
-        */
-       p->state = TASK_RUNNING;
 }
 
 /*
@@ -2556,6 +2580,12 @@ void sched_fork(struct task_struct *p, int clone_flags)
        int cpu = get_cpu();
 
        __sched_fork(p);
+       /*
+        * We mark the process as waking here. This guarantees that
+        * nobody will actually run it, and a signal or other external
+        * event cannot wake it up and insert it on the runqueue either.
+        */
+       p->state = TASK_WAKING;
 
        /*
         * Revert to default priority/policy on fork if requested.
@@ -2624,14 +2654,15 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
        struct rq *rq;
 
        rq = task_rq_lock(p, &flags);
-       BUG_ON(p->state != TASK_RUNNING);
+       BUG_ON(p->state != TASK_WAKING);
+       p->state = TASK_RUNNING;
        update_rq_clock(rq);
        activate_task(rq, p, 0);
        trace_sched_wakeup_new(rq, p, 1);
        check_preempt_curr(rq, p, WF_FORK);
 #ifdef CONFIG_SMP
-       if (p->sched_class->task_wake_up)
-               p->sched_class->task_wake_up(rq, p);
+       if (p->sched_class->task_woken)
+               p->sched_class->task_woken(rq, p);
 #endif
        task_rq_unlock(rq, &flags);
 }
@@ -3101,21 +3132,36 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
 }
 
 /*
- * If dest_cpu is allowed for this process, migrate the task to it.
- * This is accomplished by forcing the cpu_allowed mask to only
- * allow dest_cpu, which will force the cpu onto dest_cpu. Then
- * the cpu_allowed mask is restored.
+ * sched_exec - execve() is a valuable balancing opportunity, because at
+ * this point the task has the smallest effective memory and cache footprint.
  */
-static void sched_migrate_task(struct task_struct *p, int dest_cpu)
+void sched_exec(void)
 {
+       struct task_struct *p = current;
        struct migration_req req;
+       int dest_cpu, this_cpu;
        unsigned long flags;
        struct rq *rq;
 
+again:
+       this_cpu = get_cpu();
+       dest_cpu = select_task_rq(p, SD_BALANCE_EXEC, 0);
+       if (dest_cpu == this_cpu) {
+               put_cpu();
+               return;
+       }
+
        rq = task_rq_lock(p, &flags);
+       put_cpu();
+
+       /*
+        * select_task_rq() can race against ->cpus_allowed
+        */
        if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)
-           || unlikely(!cpu_active(dest_cpu)))
-               goto out;
+           || unlikely(!cpu_active(dest_cpu))) {
+               task_rq_unlock(rq, &flags);
+               goto again;
+       }
 
        /* force the process onto the specified CPU */
        if (migrate_task(p, dest_cpu, &req)) {
@@ -3130,24 +3176,10 @@ static void sched_migrate_task(struct task_struct *p, int dest_cpu)
 
                return;
        }
-out:
        task_rq_unlock(rq, &flags);
 }
 
 /*
- * sched_exec - execve() is a valuable balancing opportunity, because at
- * this point the task has the smallest effective memory and cache footprint.
- */
-void sched_exec(void)
-{
-       int new_cpu, this_cpu = get_cpu();
-       new_cpu = select_task_rq(current, SD_BALANCE_EXEC, 0);
-       put_cpu();
-       if (new_cpu != this_cpu)
-               sched_migrate_task(current, new_cpu);
-}
-
-/*
  * pull_task - move a task from a remote runqueue to the local runqueue.
  * Both runqueues must be locked.
  */
@@ -5498,8 +5530,11 @@ need_resched_nonpreemptible:
 
        post_schedule(rq);
 
-       if (unlikely(reacquire_kernel_lock(current) < 0))
+       if (unlikely(reacquire_kernel_lock(current) < 0)) {
+               prev = rq->curr;
+               switch_count = &prev->nivcsw;
                goto need_resched_nonpreemptible;
+       }
 
        preempt_enable_no_resched();
        if (need_resched())
@@ -5911,14 +5946,15 @@ EXPORT_SYMBOL(wait_for_completion_killable);
  */
 bool try_wait_for_completion(struct completion *x)
 {
+       unsigned long flags;
        int ret = 1;
 
-       spin_lock_irq(&x->wait.lock);
+       spin_lock_irqsave(&x->wait.lock, flags);
        if (!x->done)
                ret = 0;
        else
                x->done--;
-       spin_unlock_irq(&x->wait.lock);
+       spin_unlock_irqrestore(&x->wait.lock, flags);
        return ret;
 }
 EXPORT_SYMBOL(try_wait_for_completion);
@@ -5933,12 +5969,13 @@ EXPORT_SYMBOL(try_wait_for_completion);
  */
 bool completion_done(struct completion *x)
 {
+       unsigned long flags;
        int ret = 1;
 
-       spin_lock_irq(&x->wait.lock);
+       spin_lock_irqsave(&x->wait.lock, flags);
        if (!x->done)
                ret = 0;
-       spin_unlock_irq(&x->wait.lock);
+       spin_unlock_irqrestore(&x->wait.lock, flags);
        return ret;
 }
 EXPORT_SYMBOL(completion_done);
@@ -6323,7 +6360,7 @@ recheck:
         * make sure no PI-waiters arrive (or leave) while we are
         * changing the priority of the task:
         */
-       spin_lock_irqsave(&p->pi_lock, flags);
+       raw_spin_lock_irqsave(&p->pi_lock, flags);
        /*
         * To be able to change p->policy safely, the apropriate
         * runqueue lock must be held.
@@ -6333,7 +6370,7 @@ recheck:
        if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
                policy = oldpolicy = -1;
                __task_rq_unlock(rq);
-               spin_unlock_irqrestore(&p->pi_lock, flags);
+               raw_spin_unlock_irqrestore(&p->pi_lock, flags);
                goto recheck;
        }
        update_rq_clock(rq);
@@ -6357,7 +6394,7 @@ recheck:
                check_class_changed(rq, p, prev_class, oldprio, running);
        }
        __task_rq_unlock(rq);
-       spin_unlock_irqrestore(&p->pi_lock, flags);
+       raw_spin_unlock_irqrestore(&p->pi_lock, flags);
 
        rt_mutex_adjust_pi(p);
 
@@ -6457,7 +6494,7 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
                return -EINVAL;
 
        retval = -ESRCH;
-       read_lock(&tasklist_lock);
+       rcu_read_lock();
        p = find_process_by_pid(pid);
        if (p) {
                retval = security_task_getscheduler(p);
@@ -6465,7 +6502,7 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
                        retval = p->policy
                                | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
        }
-       read_unlock(&tasklist_lock);
+       rcu_read_unlock();
        return retval;
 }
 
@@ -6483,7 +6520,7 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
        if (!param || pid < 0)
                return -EINVAL;
 
-       read_lock(&tasklist_lock);
+       rcu_read_lock();
        p = find_process_by_pid(pid);
        retval = -ESRCH;
        if (!p)
@@ -6494,7 +6531,7 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
                goto out_unlock;
 
        lp.sched_priority = p->rt_priority;
-       read_unlock(&tasklist_lock);
+       rcu_read_unlock();
 
        /*
         * This one might sleep, we cannot do it with a spinlock held ...
@@ -6504,7 +6541,7 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
        return retval;
 
 out_unlock:
-       read_unlock(&tasklist_lock);
+       rcu_read_unlock();
        return retval;
 }
 
@@ -6515,22 +6552,18 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
        int retval;
 
        get_online_cpus();
-       read_lock(&tasklist_lock);
+       rcu_read_lock();
 
        p = find_process_by_pid(pid);
        if (!p) {
-               read_unlock(&tasklist_lock);
+               rcu_read_unlock();
                put_online_cpus();
                return -ESRCH;
        }
 
-       /*
-        * It is not safe to call set_cpus_allowed with the
-        * tasklist_lock held. We will bump the task_struct's
-        * usage count and then drop tasklist_lock.
-        */
+       /* Prevent p going away */
        get_task_struct(p);
-       read_unlock(&tasklist_lock);
+       rcu_read_unlock();
 
        if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
                retval = -ENOMEM;
@@ -6616,7 +6649,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
        int retval;
 
        get_online_cpus();
-       read_lock(&tasklist_lock);
+       rcu_read_lock();
 
        retval = -ESRCH;
        p = find_process_by_pid(pid);
@@ -6632,7 +6665,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
        task_rq_unlock(rq, &flags);
 
 out_unlock:
-       read_unlock(&tasklist_lock);
+       rcu_read_unlock();
        put_online_cpus();
 
        return retval;
@@ -6876,7 +6909,7 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
                return -EINVAL;
 
        retval = -ESRCH;
-       read_lock(&tasklist_lock);
+       rcu_read_lock();
        p = find_process_by_pid(pid);
        if (!p)
                goto out_unlock;
@@ -6889,13 +6922,13 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
        time_slice = p->sched_class->get_rr_interval(rq, p);
        task_rq_unlock(rq, &flags);
 
-       read_unlock(&tasklist_lock);
+       rcu_read_unlock();
        jiffies_to_timespec(time_slice, &t);
        retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
        return retval;
 
 out_unlock:
-       read_unlock(&tasklist_lock);
+       rcu_read_unlock();
        return retval;
 }
 
@@ -6986,6 +7019,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
        raw_spin_lock_irqsave(&rq->lock, flags);
 
        __sched_fork(idle);
+       idle->state = TASK_RUNNING;
        idle->se.exec_start = sched_clock();
 
        cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
@@ -7100,7 +7134,23 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
        struct rq *rq;
        int ret = 0;
 
+       /*
+        * Since we rely on wake-ups to migrate sleeping tasks, don't change
+        * the ->cpus_allowed mask from under waking tasks, which would be
+        * possible when we change rq->lock in ttwu(), so synchronize against
+        * TASK_WAKING to avoid that.
+        */
+again:
+       while (p->state == TASK_WAKING)
+               cpu_relax();
+
        rq = task_rq_lock(p, &flags);
+
+       if (p->state == TASK_WAKING) {
+               task_rq_unlock(rq, &flags);
+               goto again;
+       }
+
        if (!cpumask_intersects(new_mask, cpu_active_mask)) {
                ret = -EINVAL;
                goto out;
@@ -7156,7 +7206,7 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
 static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
 {
        struct rq *rq_dest, *rq_src;
-       int ret = 0, on_rq;
+       int ret = 0;
 
        if (unlikely(!cpu_active(dest_cpu)))
                return ret;
@@ -7172,12 +7222,13 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
        if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
                goto fail;
 
-       on_rq = p->se.on_rq;
-       if (on_rq)
+       /*
+        * If we're not on a rq, the next wake-up will ensure we're
+        * placed properly.
+        */
+       if (p->se.on_rq) {
                deactivate_task(rq_src, p, 0);
-
-       set_task_cpu(p, dest_cpu);
-       if (on_rq) {
+               set_task_cpu(p, dest_cpu);
                activate_task(rq_dest, p, 0);
                check_preempt_curr(rq_dest, p, 0);
        }
@@ -7273,37 +7324,10 @@ static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
 static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
 {
        int dest_cpu;
-       const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(dead_cpu));
 
 again:
-       /* Look for allowed, online CPU in same node. */
-       for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
-               if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
-                       goto move;
-
-       /* Any allowed, online CPU? */
-       dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
-       if (dest_cpu < nr_cpu_ids)
-               goto move;
-
-       /* No more Mr. Nice Guy. */
-       if (dest_cpu >= nr_cpu_ids) {
-               cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
-               dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
-
-               /*
-                * Don't tell them about moving exiting tasks or
-                * kernel threads (both mm NULL), since they never
-                * leave kernel.
-                */
-               if (p->mm && printk_ratelimit()) {
-                       printk(KERN_INFO "process %d (%s) no "
-                              "longer affine to cpu%d\n",
-                              task_pid_nr(p), p->comm, dead_cpu);
-               }
-       }
+       dest_cpu = select_fallback_rq(dead_cpu, p);
 
-move:
        /* It can have affinity changed while we were choosing. */
        if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu)))
                goto again;
@@ -9366,7 +9390,7 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
        rt_rq->rt_time = 0;
        rt_rq->rt_throttled = 0;
        rt_rq->rt_runtime = 0;
-       spin_lock_init(&rt_rq->rt_runtime_lock);
+       raw_spin_lock_init(&rt_rq->rt_runtime_lock);
 
 #ifdef CONFIG_RT_GROUP_SCHED
        rt_rq->rt_nr_boosted = 0;
@@ -9624,7 +9648,7 @@ void __init sched_init(void)
 #endif
 
 #ifdef CONFIG_RT_MUTEXES
-       plist_head_init(&init_task.pi_waiters, &init_task.pi_lock);
+       plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock);
 #endif
 
        /*
@@ -9668,7 +9692,7 @@ void __init sched_init(void)
 #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
 static inline int preempt_count_equals(int preempt_offset)
 {
-       int nested = preempt_count() & ~PREEMPT_ACTIVE;
+       int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
 
        return (nested == PREEMPT_INATOMIC_BASE + preempt_offset);
 }
@@ -9749,13 +9773,13 @@ void normalize_rt_tasks(void)
                        continue;
                }
 
-               spin_lock(&p->pi_lock);
+               raw_spin_lock(&p->pi_lock);
                rq = __task_rq_lock(p);
 
                normalize_task(rq, p);
 
                __task_rq_unlock(rq);
-               spin_unlock(&p->pi_lock);
+               raw_spin_unlock(&p->pi_lock);
        } while_each_thread(g, p);
 
        read_unlock_irqrestore(&tasklist_lock, flags);
@@ -10083,7 +10107,7 @@ void sched_move_task(struct task_struct *tsk)
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
        if (tsk->sched_class->moved_group)
-               tsk->sched_class->moved_group(tsk);
+               tsk->sched_class->moved_group(tsk, on_rq);
 #endif
 
        if (unlikely(running))
@@ -10305,18 +10329,18 @@ static int tg_set_bandwidth(struct task_group *tg,
        if (err)
                goto unlock;
 
-       spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
+       raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
        tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
        tg->rt_bandwidth.rt_runtime = rt_runtime;
 
        for_each_possible_cpu(i) {
                struct rt_rq *rt_rq = tg->rt_rq[i];
 
-               spin_lock(&rt_rq->rt_runtime_lock);
+               raw_spin_lock(&rt_rq->rt_runtime_lock);
                rt_rq->rt_runtime = rt_runtime;
-               spin_unlock(&rt_rq->rt_runtime_lock);
+               raw_spin_unlock(&rt_rq->rt_runtime_lock);
        }
-       spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
+       raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
  unlock:
        read_unlock(&tasklist_lock);
        mutex_unlock(&rt_constraints_mutex);
@@ -10421,15 +10445,15 @@ static int sched_rt_global_constraints(void)
        if (sysctl_sched_rt_runtime == 0)
                return -EBUSY;
 
-       spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
+       raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
        for_each_possible_cpu(i) {
                struct rt_rq *rt_rq = &cpu_rq(i)->rt;
 
-               spin_lock(&rt_rq->rt_runtime_lock);
+               raw_spin_lock(&rt_rq->rt_runtime_lock);
                rt_rq->rt_runtime = global_rt_runtime();
-               spin_unlock(&rt_rq->rt_runtime_lock);
+               raw_spin_unlock(&rt_rq->rt_runtime_lock);
        }
-       spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
+       raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
 
        return 0;
 }