sched: Remove rq argument to the tracepoints
[safe/jmp/linux-2.6] / kernel / sched.c
index af5fa23..11ac0eb 100644 (file)
@@ -71,6 +71,7 @@
 #include <linux/debugfs.h>
 #include <linux/ctype.h>
 #include <linux/ftrace.h>
+#include <linux/slab.h>
 
 #include <asm/tlb.h>
 #include <asm/irq_regs.h>
@@ -492,8 +493,11 @@ struct rq {
        #define CPU_LOAD_IDX_MAX 5
        unsigned long cpu_load[CPU_LOAD_IDX_MAX];
 #ifdef CONFIG_NO_HZ
+       u64 nohz_stamp;
        unsigned char in_nohz_recently;
 #endif
+       unsigned int skip_clock_update;
+
        /* capture load from *all* tasks on this cpu: */
        struct load_weight load;
        unsigned long nr_load_updates;
@@ -591,6 +595,13 @@ static inline
 void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
 {
        rq->curr->sched_class->check_preempt_curr(rq, p, flags);
+
+       /*
+        * A queue event has occurred, and we're going to schedule.  In
+        * this case, we can save a useless back to back clock update.
+        */
+       if (test_tsk_need_resched(p))
+               rq->skip_clock_update = 1;
 }
 
 static inline int cpu_of(struct rq *rq)
@@ -602,6 +613,11 @@ static inline int cpu_of(struct rq *rq)
 #endif
 }
 
+#define rcu_dereference_check_sched_domain(p) \
+       rcu_dereference_check((p), \
+                             rcu_read_lock_sched_held() || \
+                             lockdep_is_held(&sched_domains_mutex))
+
 /*
  * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
  * See detach_destroy_domains: synchronize_sched for details.
@@ -610,7 +626,7 @@ static inline int cpu_of(struct rq *rq)
  * preempt-disabled sections.
  */
 #define for_each_domain(cpu, __sd) \
-       for (__sd = rcu_dereference(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
+       for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
 
 #define cpu_rq(cpu)            (&per_cpu(runqueues, (cpu)))
 #define this_rq()              (&__get_cpu_var(runqueues))
@@ -620,7 +636,8 @@ static inline int cpu_of(struct rq *rq)
 
 inline void update_rq_clock(struct rq *rq)
 {
-       rq->clock = sched_clock_cpu(cpu_of(rq));
+       if (!rq->skip_clock_update)
+               rq->clock = sched_clock_cpu(cpu_of(rq));
 }
 
 /*
@@ -898,16 +915,12 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
 #endif /* __ARCH_WANT_UNLOCKED_CTXSW */
 
 /*
- * Check whether the task is waking, we use this to synchronize against
- * ttwu() so that task_cpu() reports a stable number.
- *
- * We need to make an exception for PF_STARTING tasks because the fork
- * path might require task_rq_lock() to work, eg. it can call
- * set_cpus_allowed_ptr() from the cpuset clone_ns code.
+ * Check whether the task is waking, we use this to synchronize ->cpus_allowed
+ * against ttwu().
  */
 static inline int task_is_waking(struct task_struct *p)
 {
-       return unlikely((p->state == TASK_WAKING) && !(p->flags & PF_STARTING));
+       return unlikely(p->state == TASK_WAKING);
 }
 
 /*
@@ -920,11 +933,9 @@ static inline struct rq *__task_rq_lock(struct task_struct *p)
        struct rq *rq;
 
        for (;;) {
-               while (task_is_waking(p))
-                       cpu_relax();
                rq = task_rq(p);
                raw_spin_lock(&rq->lock);
-               if (likely(rq == task_rq(p) && !task_is_waking(p)))
+               if (likely(rq == task_rq(p)))
                        return rq;
                raw_spin_unlock(&rq->lock);
        }
@@ -941,12 +952,10 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
        struct rq *rq;
 
        for (;;) {
-               while (task_is_waking(p))
-                       cpu_relax();
                local_irq_save(*flags);
                rq = task_rq(p);
                raw_spin_lock(&rq->lock);
-               if (likely(rq == task_rq(p) && !task_is_waking(p)))
+               if (likely(rq == task_rq(p)))
                        return rq;
                raw_spin_unlock_irqrestore(&rq->lock, *flags);
        }
@@ -1223,6 +1232,17 @@ void wake_up_idle_cpu(int cpu)
        if (!tsk_is_polling(rq->idle))
                smp_send_reschedule(cpu);
 }
+
+int nohz_ratelimit(int cpu)
+{
+       struct rq *rq = cpu_rq(cpu);
+       u64 diff = rq->clock - rq->nohz_stamp;
+
+       rq->nohz_stamp = rq->clock;
+
+       return diff < (NSEC_PER_SEC / HZ) >> 1;
+}
+
 #endif /* CONFIG_NO_HZ */
 
 static u64 sched_avg_period(void)
@@ -1481,7 +1501,7 @@ static unsigned long target_load(int cpu, int type)
 
 static struct sched_group *group_of(int cpu)
 {
-       struct sched_domain *sd = rcu_dereference(cpu_rq(cpu)->sd);
+       struct sched_domain *sd = rcu_dereference_sched(cpu_rq(cpu)->sd);
 
        if (!sd)
                return NULL;
@@ -1516,7 +1536,7 @@ static unsigned long cpu_avg_load_per_task(int cpu)
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
 
-static __read_mostly unsigned long *update_shares_data;
+static __read_mostly unsigned long __percpu *update_shares_data;
 
 static void __set_se_shares(struct sched_entity *se, unsigned long shares);
 
@@ -1765,8 +1785,6 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2)
                        raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
                }
        }
-       update_rq_clock(rq1);
-       update_rq_clock(rq2);
 }
 
 /*
@@ -1797,7 +1815,7 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
 }
 #endif
 
-static void calc_load_account_active(struct rq *this_rq);
+static void calc_load_account_idle(struct rq *this_rq);
 static void update_sysctl(void);
 static int get_update_sysctl_factor(void);
 
@@ -1854,62 +1872,43 @@ static void set_load_weight(struct task_struct *p)
        p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO];
 }
 
-static void update_avg(u64 *avg, u64 sample)
-{
-       s64 diff = sample - *avg;
-       *avg += diff >> 3;
-}
-
-static void
-enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head)
+static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
 {
-       if (wakeup)
-               p->se.start_runtime = p->se.sum_exec_runtime;
-
+       update_rq_clock(rq);
        sched_info_queued(p);
-       p->sched_class->enqueue_task(rq, p, wakeup, head);
+       p->sched_class->enqueue_task(rq, p, flags);
        p->se.on_rq = 1;
 }
 
-static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep)
+static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
 {
-       if (sleep) {
-               if (p->se.last_wakeup) {
-                       update_avg(&p->se.avg_overlap,
-                               p->se.sum_exec_runtime - p->se.last_wakeup);
-                       p->se.last_wakeup = 0;
-               } else {
-                       update_avg(&p->se.avg_wakeup,
-                               sysctl_sched_wakeup_granularity);
-               }
-       }
-
+       update_rq_clock(rq);
        sched_info_dequeued(p);
-       p->sched_class->dequeue_task(rq, p, sleep);
+       p->sched_class->dequeue_task(rq, p, flags);
        p->se.on_rq = 0;
 }
 
 /*
  * activate_task - move a task to the runqueue.
  */
-static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
+static void activate_task(struct rq *rq, struct task_struct *p, int flags)
 {
        if (task_contributes_to_load(p))
                rq->nr_uninterruptible--;
 
-       enqueue_task(rq, p, wakeup, false);
+       enqueue_task(rq, p, flags);
        inc_nr_running(rq);
 }
 
 /*
  * deactivate_task - remove a task from the runqueue.
  */
-static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
+static void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
 {
        if (task_contributes_to_load(p))
                rq->nr_uninterruptible++;
 
-       dequeue_task(rq, p, sleep);
+       dequeue_task(rq, p, flags);
        dec_nr_running(rq);
 }
 
@@ -2169,7 +2168,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
                 * just go back and repeat.
                 */
                rq = task_rq_lock(p, &flags);
-               trace_sched_wait_task(rq, p);
+               trace_sched_wait_task(p);
                running = task_running(rq, p);
                on_rq = p->se.on_rq;
                ncsw = 0;
@@ -2267,6 +2266,9 @@ void task_oncpu_function_call(struct task_struct *p,
 }
 
 #ifdef CONFIG_SMP
+/*
+ * ->cpus_allowed is protected by either TASK_WAKING or rq->lock held.
+ */
 static int select_fallback_rq(int cpu, struct task_struct *p)
 {
        int dest_cpu;
@@ -2283,12 +2285,8 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
                return dest_cpu;
 
        /* No more Mr. Nice Guy. */
-       if (dest_cpu >= nr_cpu_ids) {
-               rcu_read_lock();
-               cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
-               rcu_read_unlock();
-               dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
-
+       if (unlikely(dest_cpu >= nr_cpu_ids)) {
+               dest_cpu = cpuset_cpus_allowed_fallback(p);
                /*
                 * Don't tell them about moving exiting tasks or
                 * kernel threads (both mm NULL), since they never
@@ -2305,17 +2303,12 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
 }
 
 /*
- * Gets called from 3 sites (exec, fork, wakeup), since it is called without
- * holding rq->lock we need to ensure ->cpus_allowed is stable, this is done
- * by:
- *
- *  exec:           is unstable, retry loop
- *  fork & wake-up: serialize ->cpus_allowed against TASK_WAKING
+ * The caller (fork, wakeup) owns TASK_WAKING, ->cpus_allowed is stable.
  */
 static inline
-int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
+int select_task_rq(struct rq *rq, struct task_struct *p, int sd_flags, int wake_flags)
 {
-       int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
+       int cpu = p->sched_class->select_task_rq(rq, p, sd_flags, wake_flags);
 
        /*
         * In order not to call set_task_cpu() on a blocking task we need
@@ -2333,6 +2326,12 @@ int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
 
        return cpu;
 }
+
+static void update_avg(u64 *avg, u64 sample)
+{
+       s64 diff = sample - *avg;
+       *avg += diff >> 3;
+}
 #endif
 
 /***
@@ -2354,16 +2353,13 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
 {
        int cpu, orig_cpu, this_cpu, success = 0;
        unsigned long flags;
-       struct rq *rq, *orig_rq;
-
-       if (!sched_feat(SYNC_WAKEUPS))
-               wake_flags &= ~WF_SYNC;
+       unsigned long en_flags = ENQUEUE_WAKEUP;
+       struct rq *rq;
 
        this_cpu = get_cpu();
 
        smp_wmb();
-       rq = orig_rq = task_rq_lock(p, &flags);
-       update_rq_clock(rq);
+       rq = task_rq_lock(p, &flags);
        if (!(p->state & state))
                goto out;
 
@@ -2383,28 +2379,26 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
         *
         * First fix up the nr_uninterruptible count:
         */
-       if (task_contributes_to_load(p))
-               rq->nr_uninterruptible--;
+       if (task_contributes_to_load(p)) {
+               if (likely(cpu_online(orig_cpu)))
+                       rq->nr_uninterruptible--;
+               else
+                       this_rq()->nr_uninterruptible--;
+       }
        p->state = TASK_WAKING;
 
-       if (p->sched_class->task_waking)
+       if (p->sched_class->task_waking) {
                p->sched_class->task_waking(rq, p);
+               en_flags |= ENQUEUE_WAKING;
+       }
 
-       __task_rq_unlock(rq);
-
-       cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
-       if (cpu != orig_cpu) {
-               /*
-                * Since we migrate the task without holding any rq->lock,
-                * we need to be careful with task_rq_lock(), since that
-                * might end up locking an invalid rq.
-                */
+       cpu = select_task_rq(rq, p, SD_BALANCE_WAKE, wake_flags);
+       if (cpu != orig_cpu)
                set_task_cpu(p, cpu);
-       }
+       __task_rq_unlock(rq);
 
        rq = cpu_rq(cpu);
        raw_spin_lock(&rq->lock);
-       update_rq_clock(rq);
 
        /*
         * We migrated the task without holding either rq->lock, however
@@ -2432,36 +2426,20 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
 
 out_activate:
 #endif /* CONFIG_SMP */
-       schedstat_inc(p, se.nr_wakeups);
+       schedstat_inc(p, se.statistics.nr_wakeups);
        if (wake_flags & WF_SYNC)
-               schedstat_inc(p, se.nr_wakeups_sync);
+               schedstat_inc(p, se.statistics.nr_wakeups_sync);
        if (orig_cpu != cpu)
-               schedstat_inc(p, se.nr_wakeups_migrate);
+               schedstat_inc(p, se.statistics.nr_wakeups_migrate);
        if (cpu == this_cpu)
-               schedstat_inc(p, se.nr_wakeups_local);
+               schedstat_inc(p, se.statistics.nr_wakeups_local);
        else
-               schedstat_inc(p, se.nr_wakeups_remote);
-       activate_task(rq, p, 1);
+               schedstat_inc(p, se.statistics.nr_wakeups_remote);
+       activate_task(rq, p, en_flags);
        success = 1;
 
-       /*
-        * Only attribute actual wakeups done by this task.
-        */
-       if (!in_interrupt()) {
-               struct sched_entity *se = &current->se;
-               u64 sample = se->sum_exec_runtime;
-
-               if (se->last_wakeup)
-                       sample -= se->last_wakeup;
-               else
-                       sample -= se->start_runtime;
-               update_avg(&se->avg_wakeup, sample);
-
-               se->last_wakeup = se->sum_exec_runtime;
-       }
-
 out_running:
-       trace_sched_wakeup(rq, p, success);
+       trace_sched_wakeup(p, success);
        check_preempt_curr(rq, p, wake_flags);
 
        p->state = TASK_RUNNING;
@@ -2521,42 +2499,9 @@ static void __sched_fork(struct task_struct *p)
        p->se.sum_exec_runtime          = 0;
        p->se.prev_sum_exec_runtime     = 0;
        p->se.nr_migrations             = 0;
-       p->se.last_wakeup               = 0;
-       p->se.avg_overlap               = 0;
-       p->se.start_runtime             = 0;
-       p->se.avg_wakeup                = sysctl_sched_wakeup_granularity;
 
 #ifdef CONFIG_SCHEDSTATS
-       p->se.wait_start                        = 0;
-       p->se.wait_max                          = 0;
-       p->se.wait_count                        = 0;
-       p->se.wait_sum                          = 0;
-
-       p->se.sleep_start                       = 0;
-       p->se.sleep_max                         = 0;
-       p->se.sum_sleep_runtime                 = 0;
-
-       p->se.block_start                       = 0;
-       p->se.block_max                         = 0;
-       p->se.exec_max                          = 0;
-       p->se.slice_max                         = 0;
-
-       p->se.nr_migrations_cold                = 0;
-       p->se.nr_failed_migrations_affine       = 0;
-       p->se.nr_failed_migrations_running      = 0;
-       p->se.nr_failed_migrations_hot          = 0;
-       p->se.nr_forced_migrations              = 0;
-
-       p->se.nr_wakeups                        = 0;
-       p->se.nr_wakeups_sync                   = 0;
-       p->se.nr_wakeups_migrate                = 0;
-       p->se.nr_wakeups_local                  = 0;
-       p->se.nr_wakeups_remote                 = 0;
-       p->se.nr_wakeups_affine                 = 0;
-       p->se.nr_wakeups_affine_attempts        = 0;
-       p->se.nr_wakeups_passive                = 0;
-       p->se.nr_wakeups_idle                   = 0;
-
+       memset(&p->se.statistics, 0, sizeof(p->se.statistics));
 #endif
 
        INIT_LIST_HEAD(&p->rt.run_list);
@@ -2577,11 +2522,11 @@ void sched_fork(struct task_struct *p, int clone_flags)
 
        __sched_fork(p);
        /*
-        * We mark the process as waking here. This guarantees that
+        * We mark the process as running here. This guarantees that
         * nobody will actually run it, and a signal or other external
         * event cannot wake it up and insert it on the runqueue either.
         */
-       p->state = TASK_WAKING;
+       p->state = TASK_RUNNING;
 
        /*
         * Revert to default priority/policy on fork if requested.
@@ -2645,34 +2590,30 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
 {
        unsigned long flags;
        struct rq *rq;
-       int cpu = get_cpu();
+       int cpu __maybe_unused = get_cpu();
 
 #ifdef CONFIG_SMP
+       rq = task_rq_lock(p, &flags);
+       p->state = TASK_WAKING;
+
        /*
         * Fork balancing, do it here and not earlier because:
         *  - cpus_allowed can change in the fork path
         *  - any previously selected cpu might disappear through hotplug
         *
-        * We still have TASK_WAKING but PF_STARTING is gone now, meaning
-        * ->cpus_allowed is stable, we have preemption disabled, meaning
-        * cpu_online_mask is stable.
+        * We set TASK_WAKING so that select_task_rq() can drop rq->lock
+        * without people poking at ->cpus_allowed.
         */
-       cpu = select_task_rq(p, SD_BALANCE_FORK, 0);
+       cpu = select_task_rq(rq, p, SD_BALANCE_FORK, 0);
        set_task_cpu(p, cpu);
-#endif
 
-       /*
-        * Since the task is not on the rq and we still have TASK_WAKING set
-        * nobody else will migrate this task.
-        */
-       rq = cpu_rq(cpu);
-       raw_spin_lock_irqsave(&rq->lock, flags);
-
-       BUG_ON(p->state != TASK_WAKING);
        p->state = TASK_RUNNING;
-       update_rq_clock(rq);
+       task_rq_unlock(rq, &flags);
+#endif
+
+       rq = task_rq_lock(p, &flags);
        activate_task(rq, p, 0);
-       trace_sched_wakeup_new(rq, p, 1);
+       trace_sched_wakeup_new(p, 1);
        check_preempt_curr(rq, p, WF_FORK);
 #ifdef CONFIG_SMP
        if (p->sched_class->task_woken)
@@ -2798,7 +2739,13 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
         */
        prev_state = prev->state;
        finish_arch_switch(prev);
-       perf_event_task_sched_in(current, cpu_of(rq));
+#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
+       local_irq_disable();
+#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
+       perf_event_task_sched_in(current);
+#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
+       local_irq_enable();
+#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
        finish_lock_switch(rq, prev);
 
        fire_sched_in_preempt_notifiers(current);
@@ -2886,7 +2833,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
        struct mm_struct *mm, *oldmm;
 
        prepare_task_switch(rq, prev, next);
-       trace_sched_switch(rq, prev, next);
+       trace_sched_switch(prev, next);
        mm = next->mm;
        oldmm = prev->active_mm;
        /*
@@ -3003,6 +2950,61 @@ static unsigned long calc_load_update;
 unsigned long avenrun[3];
 EXPORT_SYMBOL(avenrun);
 
+static long calc_load_fold_active(struct rq *this_rq)
+{
+       long nr_active, delta = 0;
+
+       nr_active = this_rq->nr_running;
+       nr_active += (long) this_rq->nr_uninterruptible;
+
+       if (nr_active != this_rq->calc_load_active) {
+               delta = nr_active - this_rq->calc_load_active;
+               this_rq->calc_load_active = nr_active;
+       }
+
+       return delta;
+}
+
+#ifdef CONFIG_NO_HZ
+/*
+ * For NO_HZ we delay the active fold to the next LOAD_FREQ update.
+ *
+ * When making the ILB scale, we should try to pull this in as well.
+ */
+static atomic_long_t calc_load_tasks_idle;
+
+static void calc_load_account_idle(struct rq *this_rq)
+{
+       long delta;
+
+       delta = calc_load_fold_active(this_rq);
+       if (delta)
+               atomic_long_add(delta, &calc_load_tasks_idle);
+}
+
+static long calc_load_fold_idle(void)
+{
+       long delta = 0;
+
+       /*
+        * Its got a race, we don't care...
+        */
+       if (atomic_long_read(&calc_load_tasks_idle))
+               delta = atomic_long_xchg(&calc_load_tasks_idle, 0);
+
+       return delta;
+}
+#else
+static void calc_load_account_idle(struct rq *this_rq)
+{
+}
+
+static inline long calc_load_fold_idle(void)
+{
+       return 0;
+}
+#endif
+
 /**
  * get_avenrun - get the load average array
  * @loads:     pointer to dest load array
@@ -3049,20 +3051,22 @@ void calc_global_load(void)
 }
 
 /*
- * Either called from update_cpu_load() or from a cpu going idle
+ * Called from update_cpu_load() to periodically update this CPU's
+ * active count.
  */
 static void calc_load_account_active(struct rq *this_rq)
 {
-       long nr_active, delta;
+       long delta;
 
-       nr_active = this_rq->nr_running;
-       nr_active += (long) this_rq->nr_uninterruptible;
+       if (time_before(jiffies, this_rq->calc_load_update))
+               return;
 
-       if (nr_active != this_rq->calc_load_active) {
-               delta = nr_active - this_rq->calc_load_active;
-               this_rq->calc_load_active = nr_active;
+       delta  = calc_load_fold_active(this_rq);
+       delta += calc_load_fold_idle();
+       if (delta)
                atomic_long_add(delta, &calc_load_tasks);
-       }
+
+       this_rq->calc_load_update += LOAD_FREQ;
 }
 
 /*
@@ -3094,10 +3098,7 @@ static void update_cpu_load(struct rq *this_rq)
                this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i;
        }
 
-       if (time_after_eq(jiffies, this_rq->calc_load_update)) {
-               this_rq->calc_load_update += LOAD_FREQ;
-               calc_load_account_active(this_rq);
-       }
+       calc_load_account_active(this_rq);
 }
 
 #ifdef CONFIG_SMP
@@ -3110,32 +3111,21 @@ void sched_exec(void)
 {
        struct task_struct *p = current;
        struct migration_req req;
-       int dest_cpu, this_cpu;
        unsigned long flags;
        struct rq *rq;
-
-again:
-       this_cpu = get_cpu();
-       dest_cpu = select_task_rq(p, SD_BALANCE_EXEC, 0);
-       if (dest_cpu == this_cpu) {
-               put_cpu();
-               return;
-       }
+       int dest_cpu;
 
        rq = task_rq_lock(p, &flags);
-       put_cpu();
+       dest_cpu = p->sched_class->select_task_rq(rq, p, SD_BALANCE_EXEC, 0);
+       if (dest_cpu == smp_processor_id())
+               goto unlock;
 
        /*
         * select_task_rq() can race against ->cpus_allowed
         */
-       if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)
-           || unlikely(!cpu_active(dest_cpu))) {
-               task_rq_unlock(rq, &flags);
-               goto again;
-       }
-
-       /* force the process onto the specified CPU */
-       if (migrate_task(p, dest_cpu, &req)) {
+       if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) &&
+           likely(cpu_active(dest_cpu)) &&
+           migrate_task(p, dest_cpu, &req)) {
                /* Need to wait for migration thread (might exit: take ref). */
                struct task_struct *mt = rq->migration_thread;
 
@@ -3147,6 +3137,7 @@ again:
 
                return;
        }
+unlock:
        task_rq_unlock(rq, &flags);
 }
 
@@ -3504,7 +3495,7 @@ void scheduler_tick(void)
        curr->sched_class->task_tick(rq, curr, 0);
        raw_spin_unlock(&rq->lock);
 
-       perf_event_task_tick(curr, cpu);
+       perf_event_task_tick(curr);
 
 #ifdef CONFIG_SMP
        rq->idle_at_tick = idle_cpu(cpu);
@@ -3618,23 +3609,9 @@ static inline void schedule_debug(struct task_struct *prev)
 
 static void put_prev_task(struct rq *rq, struct task_struct *prev)
 {
-       if (prev->state == TASK_RUNNING) {
-               u64 runtime = prev->se.sum_exec_runtime;
-
-               runtime -= prev->se.prev_sum_exec_runtime;
-               runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
-
-               /*
-                * In order to avoid avg_overlap growing stale when we are
-                * indeed overlapping and hence not getting put to sleep, grow
-                * the avg_overlap on preemption.
-                *
-                * We use the average preemption runtime because that
-                * correlates to the amount of cache footprint a task can
-                * build up.
-                */
-               update_avg(&prev->se.avg_overlap, runtime);
-       }
+       if (prev->se.on_rq)
+               update_rq_clock(rq);
+       rq->skip_clock_update = 0;
        prev->sched_class->put_prev_task(rq, prev);
 }
 
@@ -3697,14 +3674,13 @@ need_resched_nonpreemptible:
                hrtick_clear(rq);
 
        raw_spin_lock_irq(&rq->lock);
-       update_rq_clock(rq);
        clear_tsk_need_resched(prev);
 
        if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
                if (unlikely(signal_pending_state(prev->state, prev)))
                        prev->state = TASK_RUNNING;
                else
-                       deactivate_task(rq, prev, 1);
+                       deactivate_task(rq, prev, DEQUEUE_SLEEP);
                switch_count = &prev->nvcsw;
        }
 
@@ -3718,7 +3694,7 @@ need_resched_nonpreemptible:
 
        if (likely(prev != next)) {
                sched_info_switch(prev, next);
-               perf_event_task_sched_out(prev, next, cpu);
+               perf_event_task_sched_out(prev, next);
 
                rq->nr_switches++;
                rq->curr = next;
@@ -4249,14 +4225,14 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
        unsigned long flags;
        int oldprio, on_rq, running;
        struct rq *rq;
-       const struct sched_class *prev_class = p->sched_class;
+       const struct sched_class *prev_class;
 
        BUG_ON(prio < 0 || prio > MAX_PRIO);
 
        rq = task_rq_lock(p, &flags);
-       update_rq_clock(rq);
 
        oldprio = p->prio;
+       prev_class = p->sched_class;
        on_rq = p->se.on_rq;
        running = task_current(rq, p);
        if (on_rq)
@@ -4274,7 +4250,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
        if (running)
                p->sched_class->set_curr_task(rq);
        if (on_rq) {
-               enqueue_task(rq, p, 0, oldprio < prio);
+               enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
 
                check_class_changed(rq, p, prev_class, oldprio, running);
        }
@@ -4296,7 +4272,6 @@ void set_user_nice(struct task_struct *p, long nice)
         * the task might be in the middle of scheduling on another CPU.
         */
        rq = task_rq_lock(p, &flags);
-       update_rq_clock(rq);
        /*
         * The RT priorities are set via sched_setscheduler(), but we still
         * allow the 'normal' nice value to be set - but as expected
@@ -4318,7 +4293,7 @@ void set_user_nice(struct task_struct *p, long nice)
        delta = p->prio - old_prio;
 
        if (on_rq) {
-               enqueue_task(rq, p, 0, false);
+               enqueue_task(rq, p, 0);
                /*
                 * If the task increased its priority or is running and
                 * lowered its priority, then reschedule its CPU:
@@ -4341,7 +4316,7 @@ int can_nice(const struct task_struct *p, const int nice)
        /* convert nice value [19,-20] to rlimit style value [1,40] */
        int nice_rlim = 20 - nice;
 
-       return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
+       return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
                capable(CAP_SYS_NICE));
 }
 
@@ -4476,7 +4451,7 @@ static int __sched_setscheduler(struct task_struct *p, int policy,
 {
        int retval, oldprio, oldpolicy = -1, on_rq, running;
        unsigned long flags;
-       const struct sched_class *prev_class = p->sched_class;
+       const struct sched_class *prev_class;
        struct rq *rq;
        int reset_on_fork;
 
@@ -4518,7 +4493,7 @@ recheck:
 
                        if (!lock_task_sighand(p, &flags))
                                return -ESRCH;
-                       rlim_rtprio = p->signal->rlim[RLIMIT_RTPRIO].rlim_cur;
+                       rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO);
                        unlock_task_sighand(p, &flags);
 
                        /* can't set/change the rt policy */
@@ -4579,7 +4554,6 @@ recheck:
                raw_spin_unlock_irqrestore(&p->pi_lock, flags);
                goto recheck;
        }
-       update_rq_clock(rq);
        on_rq = p->se.on_rq;
        running = task_current(rq, p);
        if (on_rq)
@@ -4590,6 +4564,7 @@ recheck:
        p->sched_reset_on_fork = reset_on_fork;
 
        oldprio = p->prio;
+       prev_class = p->sched_class;
        __setscheduler(rq, p, policy, param->sched_priority);
 
        if (running)
@@ -4889,7 +4864,9 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
        int ret;
        cpumask_var_t mask;
 
-       if (len < cpumask_size())
+       if ((len * BITS_PER_BYTE) < nr_cpu_ids)
+               return -EINVAL;
+       if (len & (sizeof(unsigned long)-1))
                return -EINVAL;
 
        if (!alloc_cpumask_var(&mask, GFP_KERNEL))
@@ -4897,10 +4874,12 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
 
        ret = sched_getaffinity(pid, mask);
        if (ret == 0) {
-               if (copy_to_user(user_mask_ptr, mask, cpumask_size()))
+               size_t retlen = min_t(size_t, len, cpumask_size());
+
+               if (copy_to_user(user_mask_ptr, mask, retlen))
                        ret = -EFAULT;
                else
-                       ret = cpumask_size();
+                       ret = retlen;
        }
        free_cpumask_var(mask);
 
@@ -5340,7 +5319,18 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
        struct rq *rq;
        int ret = 0;
 
+       /*
+        * Serialize against TASK_WAKING so that ttwu() and wunt() can
+        * drop the rq->lock and still rely on ->cpus_allowed.
+        */
+again:
+       while (task_is_waking(p))
+               cpu_relax();
        rq = task_rq_lock(p, &flags);
+       if (task_is_waking(p)) {
+               task_rq_unlock(rq, &flags);
+               goto again;
+       }
 
        if (!cpumask_intersects(new_mask, cpu_active_mask)) {
                ret = -EINVAL;
@@ -5370,7 +5360,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
 
                get_task_struct(mt);
                task_rq_unlock(rq, &flags);
-               wake_up_process(rq->migration_thread);
+               wake_up_process(mt);
                put_task_struct(mt);
                wait_for_completion(&req.done);
                tlb_migrate_finish(p->mm);
@@ -5498,30 +5488,29 @@ static int migration_thread(void *data)
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-
-static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
-{
-       int ret;
-
-       local_irq_disable();
-       ret = __migrate_task(p, src_cpu, dest_cpu);
-       local_irq_enable();
-       return ret;
-}
-
 /*
  * Figure out where task on dead CPU should go, use force if necessary.
  */
-static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
+void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
 {
-       int dest_cpu;
+       struct rq *rq = cpu_rq(dead_cpu);
+       int needs_cpu, uninitialized_var(dest_cpu);
+       unsigned long flags;
 
-again:
-       dest_cpu = select_fallback_rq(dead_cpu, p);
+       local_irq_save(flags);
 
-       /* It can have affinity changed while we were choosing. */
-       if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu)))
-               goto again;
+       raw_spin_lock(&rq->lock);
+       needs_cpu = (task_cpu(p) == dead_cpu) && (p->state != TASK_WAKING);
+       if (needs_cpu)
+               dest_cpu = select_fallback_rq(dead_cpu, p);
+       raw_spin_unlock(&rq->lock);
+       /*
+        * It can only fail if we race with set_cpus_allowed(),
+        * in the racer should migrate the task anyway.
+        */
+       if (needs_cpu)
+               __migrate_task(p, dead_cpu, dest_cpu);
+       local_irq_restore(flags);
 }
 
 /*
@@ -5585,7 +5574,6 @@ void sched_idle_next(void)
 
        __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
 
-       update_rq_clock(rq);
        activate_task(rq, p, 0);
 
        raw_spin_unlock_irqrestore(&rq->lock, flags);
@@ -5640,7 +5628,6 @@ static void migrate_dead_tasks(unsigned int dead_cpu)
        for ( ; ; ) {
                if (!rq->nr_running)
                        break;
-               update_rq_clock(rq);
                next = pick_next_task(rq);
                if (!next)
                        break;
@@ -5916,7 +5903,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
 
        case CPU_DEAD:
        case CPU_DEAD_FROZEN:
-               cpuset_lock(); /* around calls to cpuset_cpus_allowed_lock() */
                migrate_live_tasks(cpu);
                rq = cpu_rq(cpu);
                kthread_stop(rq->migration_thread);
@@ -5924,13 +5910,11 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
                rq->migration_thread = NULL;
                /* Idle task back to normal (off runqueue, low prio) */
                raw_spin_lock_irq(&rq->lock);
-               update_rq_clock(rq);
                deactivate_task(rq, rq->idle, 0);
                __setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
                rq->idle->sched_class = &idle_sched_class;
                migrate_dead_tasks(cpu);
                raw_spin_unlock_irq(&rq->lock);
-               cpuset_unlock();
                migrate_nr_uninterruptible(rq);
                BUG_ON(rq->nr_running != 0);
                calc_global_load_remove(rq);
@@ -6287,6 +6271,9 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
        struct rq *rq = cpu_rq(cpu);
        struct sched_domain *tmp;
 
+       for (tmp = sd; tmp; tmp = tmp->parent)
+               tmp->span_weight = cpumask_weight(sched_domain_span(tmp));
+
        /* Remove the sched domains which do not contribute to scheduling. */
        for (tmp = sd; tmp; ) {
                struct sched_domain *parent = tmp->parent;
@@ -7393,11 +7380,13 @@ static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
 
 #ifdef CONFIG_SCHED_MC
 static ssize_t sched_mc_power_savings_show(struct sysdev_class *class,
+                                          struct sysdev_class_attribute *attr,
                                           char *page)
 {
        return sprintf(page, "%u\n", sched_mc_power_savings);
 }
 static ssize_t sched_mc_power_savings_store(struct sysdev_class *class,
+                                           struct sysdev_class_attribute *attr,
                                            const char *buf, size_t count)
 {
        return sched_power_savings_store(buf, count, 0);
@@ -7409,11 +7398,13 @@ static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644,
 
 #ifdef CONFIG_SCHED_SMT
 static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev,
+                                           struct sysdev_class_attribute *attr,
                                            char *page)
 {
        return sprintf(page, "%u\n", sched_smt_power_savings);
 }
 static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev,
+                                            struct sysdev_class_attribute *attr,
                                             const char *buf, size_t count)
 {
        return sched_power_savings_store(buf, count, 1);
@@ -7870,7 +7861,6 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
 {
        int on_rq;
 
-       update_rq_clock(rq);
        on_rq = p->se.on_rq;
        if (on_rq)
                deactivate_task(rq, p, 0);
@@ -7897,9 +7887,9 @@ void normalize_rt_tasks(void)
 
                p->se.exec_start                = 0;
 #ifdef CONFIG_SCHEDSTATS
-               p->se.wait_start                = 0;
-               p->se.sleep_start               = 0;
-               p->se.block_start               = 0;
+               p->se.statistics.wait_start     = 0;
+               p->se.statistics.sleep_start    = 0;
+               p->se.statistics.block_start    = 0;
 #endif
 
                if (!rt_task(p)) {
@@ -8232,8 +8222,6 @@ void sched_move_task(struct task_struct *tsk)
 
        rq = task_rq_lock(tsk, &flags);
 
-       update_rq_clock(rq);
-
        running = task_current(rq, tsk);
        on_rq = tsk->se.on_rq;
 
@@ -8252,7 +8240,7 @@ void sched_move_task(struct task_struct *tsk)
        if (unlikely(running))
                tsk->sched_class->set_curr_task(rq);
        if (on_rq)
-               enqueue_task(rq, tsk, 0, false);
+               enqueue_task(rq, tsk, 0);
 
        task_rq_unlock(rq, &flags);
 }
@@ -8800,7 +8788,7 @@ struct cgroup_subsys cpu_cgroup_subsys = {
 struct cpuacct {
        struct cgroup_subsys_state css;
        /* cpuusage holds pointer to a u64-type object on every cpu */
-       u64 *cpuusage;
+       u64 __percpu *cpuusage;
        struct percpu_counter cpustat[CPUACCT_STAT_NSTATS];
        struct cpuacct *parent;
 };