async: Handle kthread_run() return codes.
[safe/jmp/linux-2.6] / kernel / sched_rt.c
index 61d1988..bac1061 100644 (file)
@@ -4,20 +4,18 @@
  */
 
 #ifdef CONFIG_SMP
-static cpumask_t rt_overload_mask;
-static atomic_t rto_count;
-static inline int rt_overloaded(void)
-{
-       return atomic_read(&rto_count);
-}
-static inline cpumask_t *rt_overload(void)
+
+static inline int rt_overloaded(struct rq *rq)
 {
-       return &rt_overload_mask;
+       return atomic_read(&rq->rd->rto_count);
 }
+
 static inline void rt_set_overload(struct rq *rq)
 {
-       rq->rt.overloaded = 1;
-       cpu_set(rq->cpu, rt_overload_mask);
+       if (!rq->online)
+               return;
+
+       cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
        /*
         * Make sure the mask is visible before we set
         * the overload count. That is checked to determine
@@ -26,25 +24,487 @@ static inline void rt_set_overload(struct rq *rq)
         * updated yet.
         */
        wmb();
-       atomic_inc(&rto_count);
+       atomic_inc(&rq->rd->rto_count);
 }
+
 static inline void rt_clear_overload(struct rq *rq)
 {
+       if (!rq->online)
+               return;
+
        /* the order here really doesn't matter */
-       atomic_dec(&rto_count);
-       cpu_clear(rq->cpu, rt_overload_mask);
-       rq->rt.overloaded = 0;
+       atomic_dec(&rq->rd->rto_count);
+       cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
 }
 
 static void update_rt_migration(struct rq *rq)
 {
-       if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1))
-               rt_set_overload(rq);
-       else
+       if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) {
+               if (!rq->rt.overloaded) {
+                       rt_set_overload(rq);
+                       rq->rt.overloaded = 1;
+               }
+       } else if (rq->rt.overloaded) {
                rt_clear_overload(rq);
+               rq->rt.overloaded = 0;
+       }
 }
 #endif /* CONFIG_SMP */
 
+static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
+{
+       return container_of(rt_se, struct task_struct, rt);
+}
+
+static inline int on_rt_rq(struct sched_rt_entity *rt_se)
+{
+       return !list_empty(&rt_se->run_list);
+}
+
+#ifdef CONFIG_RT_GROUP_SCHED
+
+static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
+{
+       if (!rt_rq->tg)
+               return RUNTIME_INF;
+
+       return rt_rq->rt_runtime;
+}
+
+static inline u64 sched_rt_period(struct rt_rq *rt_rq)
+{
+       return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
+}
+
+#define for_each_leaf_rt_rq(rt_rq, rq) \
+       list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
+
+static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
+{
+       return rt_rq->rq;
+}
+
+static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
+{
+       return rt_se->rt_rq;
+}
+
+#define for_each_sched_rt_entity(rt_se) \
+       for (; rt_se; rt_se = rt_se->parent)
+
+static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
+{
+       return rt_se->my_q;
+}
+
+static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
+static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
+
+static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
+{
+       struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
+       struct sched_rt_entity *rt_se = rt_rq->rt_se;
+
+       if (rt_rq->rt_nr_running) {
+               if (rt_se && !on_rt_rq(rt_se))
+                       enqueue_rt_entity(rt_se);
+               if (rt_rq->highest_prio < curr->prio)
+                       resched_task(curr);
+       }
+}
+
+static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
+{
+       struct sched_rt_entity *rt_se = rt_rq->rt_se;
+
+       if (rt_se && on_rt_rq(rt_se))
+               dequeue_rt_entity(rt_se);
+}
+
+static inline int rt_rq_throttled(struct rt_rq *rt_rq)
+{
+       return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
+}
+
+static int rt_se_boosted(struct sched_rt_entity *rt_se)
+{
+       struct rt_rq *rt_rq = group_rt_rq(rt_se);
+       struct task_struct *p;
+
+       if (rt_rq)
+               return !!rt_rq->rt_nr_boosted;
+
+       p = rt_task_of(rt_se);
+       return p->prio != p->normal_prio;
+}
+
+#ifdef CONFIG_SMP
+static inline const struct cpumask *sched_rt_period_mask(void)
+{
+       return cpu_rq(smp_processor_id())->rd->span;
+}
+#else
+static inline const struct cpumask *sched_rt_period_mask(void)
+{
+       return cpu_online_mask;
+}
+#endif
+
+static inline
+struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
+{
+       return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
+}
+
+static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
+{
+       return &rt_rq->tg->rt_bandwidth;
+}
+
+#else /* !CONFIG_RT_GROUP_SCHED */
+
+static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
+{
+       return rt_rq->rt_runtime;
+}
+
+static inline u64 sched_rt_period(struct rt_rq *rt_rq)
+{
+       return ktime_to_ns(def_rt_bandwidth.rt_period);
+}
+
+#define for_each_leaf_rt_rq(rt_rq, rq) \
+       for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
+
+static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
+{
+       return container_of(rt_rq, struct rq, rt);
+}
+
+static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
+{
+       struct task_struct *p = rt_task_of(rt_se);
+       struct rq *rq = task_rq(p);
+
+       return &rq->rt;
+}
+
+#define for_each_sched_rt_entity(rt_se) \
+       for (; rt_se; rt_se = NULL)
+
+static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
+{
+       return NULL;
+}
+
+static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
+{
+       if (rt_rq->rt_nr_running)
+               resched_task(rq_of_rt_rq(rt_rq)->curr);
+}
+
+static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
+{
+}
+
+static inline int rt_rq_throttled(struct rt_rq *rt_rq)
+{
+       return rt_rq->rt_throttled;
+}
+
+static inline const struct cpumask *sched_rt_period_mask(void)
+{
+       return cpu_online_mask;
+}
+
+static inline
+struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
+{
+       return &cpu_rq(cpu)->rt;
+}
+
+static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
+{
+       return &def_rt_bandwidth;
+}
+
+#endif /* CONFIG_RT_GROUP_SCHED */
+
+#ifdef CONFIG_SMP
+/*
+ * We ran out of runtime, see if we can borrow some from our neighbours.
+ */
+static int do_balance_runtime(struct rt_rq *rt_rq)
+{
+       struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
+       struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
+       int i, weight, more = 0;
+       u64 rt_period;
+
+       weight = cpumask_weight(rd->span);
+
+       spin_lock(&rt_b->rt_runtime_lock);
+       rt_period = ktime_to_ns(rt_b->rt_period);
+       for_each_cpu(i, rd->span) {
+               struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
+               s64 diff;
+
+               if (iter == rt_rq)
+                       continue;
+
+               spin_lock(&iter->rt_runtime_lock);
+               /*
+                * Either all rqs have inf runtime and there's nothing to steal
+                * or __disable_runtime() below sets a specific rq to inf to
+                * indicate its been disabled and disalow stealing.
+                */
+               if (iter->rt_runtime == RUNTIME_INF)
+                       goto next;
+
+               /*
+                * From runqueues with spare time, take 1/n part of their
+                * spare time, but no more than our period.
+                */
+               diff = iter->rt_runtime - iter->rt_time;
+               if (diff > 0) {
+                       diff = div_u64((u64)diff, weight);
+                       if (rt_rq->rt_runtime + diff > rt_period)
+                               diff = rt_period - rt_rq->rt_runtime;
+                       iter->rt_runtime -= diff;
+                       rt_rq->rt_runtime += diff;
+                       more = 1;
+                       if (rt_rq->rt_runtime == rt_period) {
+                               spin_unlock(&iter->rt_runtime_lock);
+                               break;
+                       }
+               }
+next:
+               spin_unlock(&iter->rt_runtime_lock);
+       }
+       spin_unlock(&rt_b->rt_runtime_lock);
+
+       return more;
+}
+
+/*
+ * Ensure this RQ takes back all the runtime it lend to its neighbours.
+ */
+static void __disable_runtime(struct rq *rq)
+{
+       struct root_domain *rd = rq->rd;
+       struct rt_rq *rt_rq;
+
+       if (unlikely(!scheduler_running))
+               return;
+
+       for_each_leaf_rt_rq(rt_rq, rq) {
+               struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
+               s64 want;
+               int i;
+
+               spin_lock(&rt_b->rt_runtime_lock);
+               spin_lock(&rt_rq->rt_runtime_lock);
+               /*
+                * Either we're all inf and nobody needs to borrow, or we're
+                * already disabled and thus have nothing to do, or we have
+                * exactly the right amount of runtime to take out.
+                */
+               if (rt_rq->rt_runtime == RUNTIME_INF ||
+                               rt_rq->rt_runtime == rt_b->rt_runtime)
+                       goto balanced;
+               spin_unlock(&rt_rq->rt_runtime_lock);
+
+               /*
+                * Calculate the difference between what we started out with
+                * and what we current have, that's the amount of runtime
+                * we lend and now have to reclaim.
+                */
+               want = rt_b->rt_runtime - rt_rq->rt_runtime;
+
+               /*
+                * Greedy reclaim, take back as much as we can.
+                */
+               for_each_cpu(i, rd->span) {
+                       struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
+                       s64 diff;
+
+                       /*
+                        * Can't reclaim from ourselves or disabled runqueues.
+                        */
+                       if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
+                               continue;
+
+                       spin_lock(&iter->rt_runtime_lock);
+                       if (want > 0) {
+                               diff = min_t(s64, iter->rt_runtime, want);
+                               iter->rt_runtime -= diff;
+                               want -= diff;
+                       } else {
+                               iter->rt_runtime -= want;
+                               want -= want;
+                       }
+                       spin_unlock(&iter->rt_runtime_lock);
+
+                       if (!want)
+                               break;
+               }
+
+               spin_lock(&rt_rq->rt_runtime_lock);
+               /*
+                * We cannot be left wanting - that would mean some runtime
+                * leaked out of the system.
+                */
+               BUG_ON(want);
+balanced:
+               /*
+                * Disable all the borrow logic by pretending we have inf
+                * runtime - in which case borrowing doesn't make sense.
+                */
+               rt_rq->rt_runtime = RUNTIME_INF;
+               spin_unlock(&rt_rq->rt_runtime_lock);
+               spin_unlock(&rt_b->rt_runtime_lock);
+       }
+}
+
+static void disable_runtime(struct rq *rq)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&rq->lock, flags);
+       __disable_runtime(rq);
+       spin_unlock_irqrestore(&rq->lock, flags);
+}
+
+static void __enable_runtime(struct rq *rq)
+{
+       struct rt_rq *rt_rq;
+
+       if (unlikely(!scheduler_running))
+               return;
+
+       /*
+        * Reset each runqueue's bandwidth settings
+        */
+       for_each_leaf_rt_rq(rt_rq, rq) {
+               struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
+
+               spin_lock(&rt_b->rt_runtime_lock);
+               spin_lock(&rt_rq->rt_runtime_lock);
+               rt_rq->rt_runtime = rt_b->rt_runtime;
+               rt_rq->rt_time = 0;
+               rt_rq->rt_throttled = 0;
+               spin_unlock(&rt_rq->rt_runtime_lock);
+               spin_unlock(&rt_b->rt_runtime_lock);
+       }
+}
+
+static void enable_runtime(struct rq *rq)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&rq->lock, flags);
+       __enable_runtime(rq);
+       spin_unlock_irqrestore(&rq->lock, flags);
+}
+
+static int balance_runtime(struct rt_rq *rt_rq)
+{
+       int more = 0;
+
+       if (rt_rq->rt_time > rt_rq->rt_runtime) {
+               spin_unlock(&rt_rq->rt_runtime_lock);
+               more = do_balance_runtime(rt_rq);
+               spin_lock(&rt_rq->rt_runtime_lock);
+       }
+
+       return more;
+}
+#else /* !CONFIG_SMP */
+static inline int balance_runtime(struct rt_rq *rt_rq)
+{
+       return 0;
+}
+#endif /* CONFIG_SMP */
+
+static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
+{
+       int i, idle = 1;
+       const struct cpumask *span;
+
+       if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
+               return 1;
+
+       span = sched_rt_period_mask();
+       for_each_cpu(i, span) {
+               int enqueue = 0;
+               struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
+               struct rq *rq = rq_of_rt_rq(rt_rq);
+
+               spin_lock(&rq->lock);
+               if (rt_rq->rt_time) {
+                       u64 runtime;
+
+                       spin_lock(&rt_rq->rt_runtime_lock);
+                       if (rt_rq->rt_throttled)
+                               balance_runtime(rt_rq);
+                       runtime = rt_rq->rt_runtime;
+                       rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
+                       if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
+                               rt_rq->rt_throttled = 0;
+                               enqueue = 1;
+                       }
+                       if (rt_rq->rt_time || rt_rq->rt_nr_running)
+                               idle = 0;
+                       spin_unlock(&rt_rq->rt_runtime_lock);
+               } else if (rt_rq->rt_nr_running)
+                       idle = 0;
+
+               if (enqueue)
+                       sched_rt_rq_enqueue(rt_rq);
+               spin_unlock(&rq->lock);
+       }
+
+       return idle;
+}
+
+static inline int rt_se_prio(struct sched_rt_entity *rt_se)
+{
+#ifdef CONFIG_RT_GROUP_SCHED
+       struct rt_rq *rt_rq = group_rt_rq(rt_se);
+
+       if (rt_rq)
+               return rt_rq->highest_prio;
+#endif
+
+       return rt_task_of(rt_se)->prio;
+}
+
+static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
+{
+       u64 runtime = sched_rt_runtime(rt_rq);
+
+       if (rt_rq->rt_throttled)
+               return rt_rq_throttled(rt_rq);
+
+       if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
+               return 0;
+
+       balance_runtime(rt_rq);
+       runtime = sched_rt_runtime(rt_rq);
+       if (runtime == RUNTIME_INF)
+               return 0;
+
+       if (rt_rq->rt_time > runtime) {
+               rt_rq->rt_throttled = 1;
+               if (rt_rq_throttled(rt_rq)) {
+                       sched_rt_rq_dequeue(rt_rq);
+                       return 1;
+               }
+       }
+
+       return 0;
+}
+
 /*
  * Update the current task's runtime statistics. Skip current tasks that
  * are not in our scheduling class.
@@ -52,6 +512,8 @@ static void update_rt_migration(struct rq *rq)
 static void update_curr_rt(struct rq *rq)
 {
        struct task_struct *curr = rq->curr;
+       struct sched_rt_entity *rt_se = &curr->rt;
+       struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
        u64 delta_exec;
 
        if (!task_has_rt_policy(curr))
@@ -64,92 +526,243 @@ static void update_curr_rt(struct rq *rq)
        schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
 
        curr->se.sum_exec_runtime += delta_exec;
+       account_group_exec_runtime(curr, delta_exec);
+
        curr->se.exec_start = rq->clock;
        cpuacct_charge(curr, delta_exec);
+
+       if (!rt_bandwidth_enabled())
+               return;
+
+       for_each_sched_rt_entity(rt_se) {
+               rt_rq = rt_rq_of_se(rt_se);
+
+               if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
+                       spin_lock(&rt_rq->rt_runtime_lock);
+                       rt_rq->rt_time += delta_exec;
+                       if (sched_rt_runtime_exceeded(rt_rq))
+                               resched_task(curr);
+                       spin_unlock(&rt_rq->rt_runtime_lock);
+               }
+       }
 }
 
-static inline void inc_rt_tasks(struct task_struct *p, struct rq *rq)
+static inline
+void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 {
-       WARN_ON(!rt_task(p));
-       rq->rt.rt_nr_running++;
+       WARN_ON(!rt_prio(rt_se_prio(rt_se)));
+       rt_rq->rt_nr_running++;
+#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
+       if (rt_se_prio(rt_se) < rt_rq->highest_prio) {
 #ifdef CONFIG_SMP
-       if (p->prio < rq->rt.highest_prio)
-               rq->rt.highest_prio = p->prio;
-       if (p->nr_cpus_allowed > 1)
+               struct rq *rq = rq_of_rt_rq(rt_rq);
+#endif
+
+               rt_rq->highest_prio = rt_se_prio(rt_se);
+#ifdef CONFIG_SMP
+               if (rq->online)
+                       cpupri_set(&rq->rd->cpupri, rq->cpu,
+                                  rt_se_prio(rt_se));
+#endif
+       }
+#endif
+#ifdef CONFIG_SMP
+       if (rt_se->nr_cpus_allowed > 1) {
+               struct rq *rq = rq_of_rt_rq(rt_rq);
+
                rq->rt.rt_nr_migratory++;
+       }
 
-       update_rt_migration(rq);
-#endif /* CONFIG_SMP */
+       update_rt_migration(rq_of_rt_rq(rt_rq));
+#endif
+#ifdef CONFIG_RT_GROUP_SCHED
+       if (rt_se_boosted(rt_se))
+               rt_rq->rt_nr_boosted++;
+
+       if (rt_rq->tg)
+               start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
+#else
+       start_rt_bandwidth(&def_rt_bandwidth);
+#endif
 }
 
-static inline void dec_rt_tasks(struct task_struct *p, struct rq *rq)
+static inline
+void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 {
-       WARN_ON(!rt_task(p));
-       WARN_ON(!rq->rt.rt_nr_running);
-       rq->rt.rt_nr_running--;
 #ifdef CONFIG_SMP
-       if (rq->rt.rt_nr_running) {
+       int highest_prio = rt_rq->highest_prio;
+#endif
+
+       WARN_ON(!rt_prio(rt_se_prio(rt_se)));
+       WARN_ON(!rt_rq->rt_nr_running);
+       rt_rq->rt_nr_running--;
+#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
+       if (rt_rq->rt_nr_running) {
                struct rt_prio_array *array;
 
-               WARN_ON(p->prio < rq->rt.highest_prio);
-               if (p->prio == rq->rt.highest_prio) {
+               WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio);
+               if (rt_se_prio(rt_se) == rt_rq->highest_prio) {
                        /* recalculate */
-                       array = &rq->rt.active;
-                       rq->rt.highest_prio =
+                       array = &rt_rq->active;
+                       rt_rq->highest_prio =
                                sched_find_first_bit(array->bitmap);
                } /* otherwise leave rq->highest prio alone */
        } else
-               rq->rt.highest_prio = MAX_RT_PRIO;
-       if (p->nr_cpus_allowed > 1)
+               rt_rq->highest_prio = MAX_RT_PRIO;
+#endif
+#ifdef CONFIG_SMP
+       if (rt_se->nr_cpus_allowed > 1) {
+               struct rq *rq = rq_of_rt_rq(rt_rq);
                rq->rt.rt_nr_migratory--;
+       }
+
+       if (rt_rq->highest_prio != highest_prio) {
+               struct rq *rq = rq_of_rt_rq(rt_rq);
 
-       update_rt_migration(rq);
+               if (rq->online)
+                       cpupri_set(&rq->rd->cpupri, rq->cpu,
+                                  rt_rq->highest_prio);
+       }
+
+       update_rt_migration(rq_of_rt_rq(rt_rq));
 #endif /* CONFIG_SMP */
+#ifdef CONFIG_RT_GROUP_SCHED
+       if (rt_se_boosted(rt_se))
+               rt_rq->rt_nr_boosted--;
+
+       WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
+#endif
 }
 
-static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
+static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
 {
-       struct rt_prio_array *array = &rq->rt.active;
+       struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
+       struct rt_prio_array *array = &rt_rq->active;
+       struct rt_rq *group_rq = group_rt_rq(rt_se);
+       struct list_head *queue = array->queue + rt_se_prio(rt_se);
 
-       list_add_tail(&p->run_list, array->queue + p->prio);
-       __set_bit(p->prio, array->bitmap);
-       inc_cpu_load(rq, p->se.load.weight);
+       /*
+        * Don't enqueue the group if its throttled, or when empty.
+        * The latter is a consequence of the former when a child group
+        * get throttled and the current group doesn't have any other
+        * active members.
+        */
+       if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
+               return;
+
+       list_add_tail(&rt_se->run_list, queue);
+       __set_bit(rt_se_prio(rt_se), array->bitmap);
+
+       inc_rt_tasks(rt_se, rt_rq);
+}
+
+static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
+{
+       struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
+       struct rt_prio_array *array = &rt_rq->active;
+
+       list_del_init(&rt_se->run_list);
+       if (list_empty(array->queue + rt_se_prio(rt_se)))
+               __clear_bit(rt_se_prio(rt_se), array->bitmap);
+
+       dec_rt_tasks(rt_se, rt_rq);
+}
+
+/*
+ * Because the prio of an upper entry depends on the lower
+ * entries, we must remove entries top - down.
+ */
+static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
+{
+       struct sched_rt_entity *back = NULL;
+
+       for_each_sched_rt_entity(rt_se) {
+               rt_se->back = back;
+               back = rt_se;
+       }
+
+       for (rt_se = back; rt_se; rt_se = rt_se->back) {
+               if (on_rt_rq(rt_se))
+                       __dequeue_rt_entity(rt_se);
+       }
+}
+
+static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
+{
+       dequeue_rt_stack(rt_se);
+       for_each_sched_rt_entity(rt_se)
+               __enqueue_rt_entity(rt_se);
+}
+
+static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
+{
+       dequeue_rt_stack(rt_se);
+
+       for_each_sched_rt_entity(rt_se) {
+               struct rt_rq *rt_rq = group_rt_rq(rt_se);
 
-       inc_rt_tasks(p, rq);
+               if (rt_rq && rt_rq->rt_nr_running)
+                       __enqueue_rt_entity(rt_se);
+       }
 }
 
 /*
  * Adding/removing a task to/from a priority array:
  */
+static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
+{
+       struct sched_rt_entity *rt_se = &p->rt;
+
+       if (wakeup)
+               rt_se->timeout = 0;
+
+       enqueue_rt_entity(rt_se);
+
+       inc_cpu_load(rq, p->se.load.weight);
+}
+
 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
 {
-       struct rt_prio_array *array = &rq->rt.active;
+       struct sched_rt_entity *rt_se = &p->rt;
 
        update_curr_rt(rq);
+       dequeue_rt_entity(rt_se);
 
-       list_del(&p->run_list);
-       if (list_empty(array->queue + p->prio))
-               __clear_bit(p->prio, array->bitmap);
        dec_cpu_load(rq, p->se.load.weight);
-
-       dec_rt_tasks(p, rq);
 }
 
 /*
  * Put task to the end of the run list without the overhead of dequeue
  * followed by enqueue.
  */
-static void requeue_task_rt(struct rq *rq, struct task_struct *p)
+static void
+requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
+{
+       if (on_rt_rq(rt_se)) {
+               struct rt_prio_array *array = &rt_rq->active;
+               struct list_head *queue = array->queue + rt_se_prio(rt_se);
+
+               if (head)
+                       list_move(&rt_se->run_list, queue);
+               else
+                       list_move_tail(&rt_se->run_list, queue);
+       }
+}
+
+static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
 {
-       struct rt_prio_array *array = &rq->rt.active;
+       struct sched_rt_entity *rt_se = &p->rt;
+       struct rt_rq *rt_rq;
 
-       list_move_tail(&p->run_list, array->queue + p->prio);
+       for_each_sched_rt_entity(rt_se) {
+               rt_rq = rt_rq_of_se(rt_se);
+               requeue_rt_entity(rt_rq, rt_se, head);
+       }
 }
 
-static void
-yield_task_rt(struct rq *rq)
+static void yield_task_rt(struct rq *rq)
 {
-       requeue_task_rt(rq, rq->curr);
+       requeue_task_rt(rq, rq->curr, 0);
 }
 
 #ifdef CONFIG_SMP
@@ -177,7 +790,7 @@ static int select_task_rq_rt(struct task_struct *p, int sync)
         * cold cache anyway.
         */
        if (unlikely(rt_task(rq->curr)) &&
-           (p->nr_cpus_allowed > 1)) {
+           (p->rt.nr_cpus_allowed > 1)) {
                int cpu = find_lowest_rq(p);
 
                return (cpu == -1) ? task_cpu(p) : cpu;
@@ -189,36 +802,107 @@ static int select_task_rq_rt(struct task_struct *p, int sync)
         */
        return task_cpu(p);
 }
+
+static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
+{
+       cpumask_var_t mask;
+
+       if (rq->curr->rt.nr_cpus_allowed == 1)
+               return;
+
+       if (!alloc_cpumask_var(&mask, GFP_ATOMIC))
+               return;
+
+       if (p->rt.nr_cpus_allowed != 1
+           && cpupri_find(&rq->rd->cpupri, p, mask))
+               goto free;
+
+       if (!cpupri_find(&rq->rd->cpupri, rq->curr, mask))
+               goto free;
+
+       /*
+        * There appears to be other cpus that can accept
+        * current and none to run 'p', so lets reschedule
+        * to try and push current away:
+        */
+       requeue_task_rt(rq, p, 1);
+       resched_task(rq->curr);
+free:
+       free_cpumask_var(mask);
+}
+
 #endif /* CONFIG_SMP */
 
 /*
  * Preempt the current task with a newly woken task if needed:
  */
-static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
+static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int sync)
 {
-       if (p->prio < rq->curr->prio)
+       if (p->prio < rq->curr->prio) {
                resched_task(rq->curr);
+               return;
+       }
+
+#ifdef CONFIG_SMP
+       /*
+        * If:
+        *
+        * - the newly woken task is of equal priority to the current task
+        * - the newly woken task is non-migratable while current is migratable
+        * - current will be preempted on the next reschedule
+        *
+        * we should check to see if current can readily move to a different
+        * cpu.  If so, we will reschedule to allow the push logic to try
+        * to move current somewhere else, making room for our non-migratable
+        * task.
+        */
+       if (p->prio == rq->curr->prio && !need_resched())
+               check_preempt_equal_prio(rq, p);
+#endif
 }
 
-static struct task_struct *pick_next_task_rt(struct rq *rq)
+static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
+                                                  struct rt_rq *rt_rq)
 {
-       struct rt_prio_array *array = &rq->rt.active;
-       struct task_struct *next;
+       struct rt_prio_array *array = &rt_rq->active;
+       struct sched_rt_entity *next = NULL;
        struct list_head *queue;
        int idx;
 
        idx = sched_find_first_bit(array->bitmap);
-       if (idx >= MAX_RT_PRIO)
-               return NULL;
+       BUG_ON(idx >= MAX_RT_PRIO);
 
        queue = array->queue + idx;
-       next = list_entry(queue->next, struct task_struct, run_list);
-
-       next->se.exec_start = rq->clock;
+       next = list_entry(queue->next, struct sched_rt_entity, run_list);
 
        return next;
 }
 
+static struct task_struct *pick_next_task_rt(struct rq *rq)
+{
+       struct sched_rt_entity *rt_se;
+       struct task_struct *p;
+       struct rt_rq *rt_rq;
+
+       rt_rq = &rq->rt;
+
+       if (unlikely(!rt_rq->rt_nr_running))
+               return NULL;
+
+       if (rt_rq_throttled(rt_rq))
+               return NULL;
+
+       do {
+               rt_se = pick_next_rt_entity(rq, rt_rq);
+               BUG_ON(!rt_se);
+               rt_rq = group_rt_rq(rt_se);
+       } while (rt_rq);
+
+       p = rt_task_of(rt_se);
+       p->se.exec_start = rq->clock;
+       return p;
+}
+
 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
 {
        update_curr_rt(rq);
@@ -226,144 +910,56 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
 }
 
 #ifdef CONFIG_SMP
+
 /* Only try algorithms three times */
 #define RT_MAX_TRIES 3
 
-static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
 
 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
 {
        if (!task_running(rq, p) &&
-           (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) &&
-           (p->nr_cpus_allowed > 1))
+           (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) &&
+           (p->rt.nr_cpus_allowed > 1))
                return 1;
        return 0;
 }
 
 /* Return the second highest RT task, NULL otherwise */
-static struct task_struct *pick_next_highest_task_rt(struct rq *rq,
-                                                    int cpu)
+static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
 {
-       struct rt_prio_array *array = &rq->rt.active;
-       struct task_struct *next;
-       struct list_head *queue;
+       struct task_struct *next = NULL;
+       struct sched_rt_entity *rt_se;
+       struct rt_prio_array *array;
+       struct rt_rq *rt_rq;
        int idx;
 
-       assert_spin_locked(&rq->lock);
-
-       if (likely(rq->rt.rt_nr_running < 2))
-               return NULL;
-
-       idx = sched_find_first_bit(array->bitmap);
-       if (unlikely(idx >= MAX_RT_PRIO)) {
-               WARN_ON(1); /* rt_nr_running is bad */
-               return NULL;
-       }
-
-       queue = array->queue + idx;
-       BUG_ON(list_empty(queue));
-
-       next = list_entry(queue->next, struct task_struct, run_list);
-       if (unlikely(pick_rt_task(rq, next, cpu)))
-               goto out;
-
-       if (queue->next->next != queue) {
-               /* same prio task */
-               next = list_entry(queue->next->next, struct task_struct, run_list);
-               if (pick_rt_task(rq, next, cpu))
-                       goto out;
-       }
-
- retry:
-       /* slower, but more flexible */
-       idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
-       if (unlikely(idx >= MAX_RT_PRIO))
-               return NULL;
-
-       queue = array->queue + idx;
-       BUG_ON(list_empty(queue));
-
-       list_for_each_entry(next, queue, run_list) {
-               if (pick_rt_task(rq, next, cpu))
-                       goto out;
-       }
-
-       goto retry;
-
- out:
-       return next;
-}
-
-static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
-
-static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask)
-{
-       int       lowest_prio = -1;
-       int       lowest_cpu  = -1;
-       int       count       = 0;
-       int       cpu;
-
-       cpus_and(*lowest_mask, cpu_online_map, task->cpus_allowed);
-
-       /*
-        * Scan each rq for the lowest prio.
-        */
-       for_each_cpu_mask(cpu, *lowest_mask) {
-               struct rq *rq = cpu_rq(cpu);
-
-               /* We look for lowest RT prio or non-rt CPU */
-               if (rq->rt.highest_prio >= MAX_RT_PRIO) {
-                       /*
-                        * if we already found a low RT queue
-                        * and now we found this non-rt queue
-                        * clear the mask and set our bit.
-                        * Otherwise just return the queue as is
-                        * and the count==1 will cause the algorithm
-                        * to use the first bit found.
-                        */
-                       if (lowest_cpu != -1) {
-                               cpus_clear(*lowest_mask);
-                               cpu_set(rq->cpu, *lowest_mask);
+       for_each_leaf_rt_rq(rt_rq, rq) {
+               array = &rt_rq->active;
+               idx = sched_find_first_bit(array->bitmap);
+ next_idx:
+               if (idx >= MAX_RT_PRIO)
+                       continue;
+               if (next && next->prio < idx)
+                       continue;
+               list_for_each_entry(rt_se, array->queue + idx, run_list) {
+                       struct task_struct *p = rt_task_of(rt_se);
+                       if (pick_rt_task(rq, p, cpu)) {
+                               next = p;
+                               break;
                        }
-                       return 1;
                }
-
-               /* no locking for now */
-               if ((rq->rt.highest_prio > task->prio)
-                   && (rq->rt.highest_prio >= lowest_prio)) {
-                       if (rq->rt.highest_prio > lowest_prio) {
-                               /* new low - clear old data */
-                               lowest_prio = rq->rt.highest_prio;
-                               lowest_cpu = cpu;
-                               count = 0;
-                       }
-                       count++;
-               } else
-                       cpu_clear(cpu, *lowest_mask);
-       }
-
-       /*
-        * Clear out all the set bits that represent
-        * runqueues that were of higher prio than
-        * the lowest_prio.
-        */
-       if (lowest_cpu > 0) {
-               /*
-                * Perhaps we could add another cpumask op to
-                * zero out bits. Like cpu_zero_bits(cpumask, nrbits);
-                * Then that could be optimized to use memset and such.
-                */
-               for_each_cpu_mask(cpu, *lowest_mask) {
-                       if (cpu >= lowest_cpu)
-                               break;
-                       cpu_clear(cpu, *lowest_mask);
+               if (!next) {
+                       idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
+                       goto next_idx;
                }
        }
 
-       return count;
+       return next;
 }
 
+static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
+
 static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
 {
        int first;
@@ -372,8 +968,8 @@ static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
        if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
                return this_cpu;
 
-       first = first_cpu(*mask);
-       if (first != NR_CPUS)
+       first = cpumask_first(mask);
+       if (first < nr_cpu_ids)
                return first;
 
        return -1;
@@ -382,20 +978,22 @@ static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
 static int find_lowest_rq(struct task_struct *task)
 {
        struct sched_domain *sd;
-       cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask);
+       struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
        int this_cpu = smp_processor_id();
        int cpu      = task_cpu(task);
-       int count    = find_lowest_cpus(task, lowest_mask);
 
-       if (!count)
+       if (task->rt.nr_cpus_allowed == 1)
+               return -1; /* No other targets possible */
+
+       if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
                return -1; /* No targets found */
 
        /*
-        * There is no sense in performing an optimal search if only one
-        * target is found.
+        * Only consider CPUs that are usable for migration.
+        * I guess we might want to change cpupri_find() to ignore those
+        * in the first place.
         */
-       if (count == 1)
-               return first_cpu(*lowest_mask);
+       cpumask_and(lowest_mask, lowest_mask, cpu_active_mask);
 
        /*
         * At this point we have built a mask of cpus representing the
@@ -405,7 +1003,7 @@ static int find_lowest_rq(struct task_struct *task)
         * We prioritize the last cpu that the task executed on since
         * it is most likely cache-hot in that location.
         */
-       if (cpu_isset(cpu, *lowest_mask))
+       if (cpumask_test_cpu(cpu, lowest_mask))
                return cpu;
 
        /*
@@ -420,7 +1018,8 @@ static int find_lowest_rq(struct task_struct *task)
                        cpumask_t domain_mask;
                        int       best_cpu;
 
-                       cpus_and(domain_mask, sd->span, *lowest_mask);
+                       cpumask_and(&domain_mask, sched_domain_span(sd),
+                                   lowest_mask);
 
                        best_cpu = pick_optimal_cpu(this_cpu,
                                                    &domain_mask);
@@ -438,12 +1037,11 @@ static int find_lowest_rq(struct task_struct *task)
 }
 
 /* Will lock the rq it finds */
-static struct rq *find_lock_lowest_rq(struct task_struct *task,
-                                     struct rq *rq)
+static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
 {
        struct rq *lowest_rq = NULL;
-       int cpu;
        int tries;
+       int cpu;
 
        for (tries = 0; tries < RT_MAX_TRIES; tries++) {
                cpu = find_lowest_rq(task);
@@ -462,9 +1060,11 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task,
                         * Also make sure that it wasn't scheduled on its rq.
                         */
                        if (unlikely(task_rq(task) != rq ||
-                                    !cpu_isset(lowest_rq->cpu, task->cpus_allowed) ||
+                                    !cpumask_test_cpu(lowest_rq->cpu,
+                                                      &task->cpus_allowed) ||
                                     task_running(rq, task) ||
                                     !task->se.on_rq)) {
+
                                spin_unlock(&lowest_rq->lock);
                                lowest_rq = NULL;
                                break;
@@ -476,7 +1076,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task,
                        break;
 
                /* try again */
-               spin_unlock(&lowest_rq->lock);
+               double_unlock_balance(rq, lowest_rq);
                lowest_rq = NULL;
        }
 
@@ -495,8 +1095,6 @@ static int push_rt_task(struct rq *rq)
        int ret = 0;
        int paranoid = RT_MAX_TRIES;
 
-       assert_spin_locked(&rq->lock);
-
        if (!rq->rt.overloaded)
                return 0;
 
@@ -541,15 +1139,13 @@ static int push_rt_task(struct rq *rq)
                goto out;
        }
 
-       assert_spin_locked(&lowest_rq->lock);
-
        deactivate_task(rq, next_task, 0);
        set_task_cpu(next_task, lowest_rq->cpu);
        activate_task(lowest_rq, next_task, 0);
 
        resched_task(lowest_rq->curr);
 
-       spin_unlock(&lowest_rq->lock);
+       double_unlock_balance(rq, lowest_rq);
 
        ret = 1;
 out:
@@ -577,64 +1173,20 @@ static void push_rt_tasks(struct rq *rq)
 
 static int pull_rt_task(struct rq *this_rq)
 {
-       struct task_struct *next;
-       struct task_struct *p;
+       int this_cpu = this_rq->cpu, ret = 0, cpu;
+       struct task_struct *p, *next;
        struct rq *src_rq;
-       cpumask_t *rto_cpumask;
-       int this_cpu = this_rq->cpu;
-       int cpu;
-       int ret = 0;
 
-       assert_spin_locked(&this_rq->lock);
-
-       /*
-        * If cpusets are used, and we have overlapping
-        * run queue cpusets, then this algorithm may not catch all.
-        * This is just the price you pay on trying to keep
-        * dirtying caches down on large SMP machines.
-        */
-       if (likely(!rt_overloaded()))
+       if (likely(!rt_overloaded(this_rq)))
                return 0;
 
        next = pick_next_task_rt(this_rq);
 
-       rto_cpumask = rt_overload();
-
-       for_each_cpu_mask(cpu, *rto_cpumask) {
+       for_each_cpu(cpu, this_rq->rd->rto_mask) {
                if (this_cpu == cpu)
                        continue;
 
                src_rq = cpu_rq(cpu);
-               if (unlikely(src_rq->rt.rt_nr_running <= 1)) {
-                       /*
-                        * It is possible that overlapping cpusets
-                        * will miss clearing a non overloaded runqueue.
-                        * Clear it now.
-                        */
-                       if (double_lock_balance(this_rq, src_rq)) {
-                               /* unlocked our runqueue lock */
-                               struct task_struct *old_next = next;
-                               next = pick_next_task_rt(this_rq);
-                               if (next != old_next)
-                                       ret = 1;
-                       }
-                       if (likely(src_rq->rt.rt_nr_running <= 1))
-                               /*
-                                * Small chance that this_rq->curr changed
-                                * but it's really harmless here.
-                                */
-                               rt_clear_overload(this_rq);
-                       else
-                               /*
-                                * Heh, the src_rq is now overloaded, since
-                                * we already have the src_rq lock, go straight
-                                * to pulling tasks from it.
-                                */
-                               goto try_pulling;
-                       spin_unlock(&src_rq->lock);
-                       continue;
-               }
-
                /*
                 * We can potentially drop this_rq's lock in
                 * double_lock_balance, and another CPU could
@@ -644,6 +1196,7 @@ static int pull_rt_task(struct rq *this_rq)
                 */
                if (double_lock_balance(this_rq, src_rq)) {
                        struct task_struct *old_next = next;
+
                        next = pick_next_task_rt(this_rq);
                        if (next != old_next)
                                ret = 1;
@@ -652,12 +1205,9 @@ static int pull_rt_task(struct rq *this_rq)
                /*
                 * Are there still pullable RT tasks?
                 */
-               if (src_rq->rt.rt_nr_running <= 1) {
-                       spin_unlock(&src_rq->lock);
-                       continue;
-               }
+               if (src_rq->rt.rt_nr_running <= 1)
+                       goto skip;
 
- try_pulling:
                p = pick_next_highest_task_rt(src_rq, this_cpu);
 
                /*
@@ -680,7 +1230,7 @@ static int pull_rt_task(struct rq *this_rq)
                         */
                        if (p->prio < src_rq->curr->prio ||
                            (next && next->prio < src_rq->curr->prio))
-                               goto bail;
+                               goto skip;
 
                        ret = 1;
 
@@ -692,9 +1242,7 @@ static int pull_rt_task(struct rq *this_rq)
                         * case there's an even higher prio task
                         * in another runqueue. (low likelyhood
                         * but possible)
-                        */
-
-                       /*
+                        *
                         * Update next so that we won't pick a task
                         * on another cpu with a priority lower (or equal)
                         * than the one we just picked.
@@ -702,23 +1250,21 @@ static int pull_rt_task(struct rq *this_rq)
                        next = p;
 
                }
bail:
-               spin_unlock(&src_rq->lock);
skip:
+               double_unlock_balance(this_rq, src_rq);
        }
 
        return ret;
 }
 
-static void schedule_balance_rt(struct rq *rq,
-                               struct task_struct *prev)
+static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
 {
        /* Try to pull RT tasks here if we lower this rq's prio */
-       if (unlikely(rt_task(prev)) &&
-           rq->rt.highest_prio > prev->prio)
+       if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio)
                pull_rt_task(rq);
 }
 
-static void schedule_tail_balance_rt(struct rq *rq)
+static void post_schedule_rt(struct rq *rq)
 {
        /*
         * If we have more than one rt_task queued, then
@@ -734,12 +1280,14 @@ static void schedule_tail_balance_rt(struct rq *rq)
        }
 }
 
-
-static void wakeup_balance_rt(struct rq *rq, struct task_struct *p)
+/*
+ * If we are not running and we are not going to reschedule soon, we should
+ * try to push tasks away now
+ */
+static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
 {
-       if (unlikely(rt_task(p)) &&
-           !task_running(rq, p) &&
-           (p->prio >= rq->rt.highest_prio) &&
+       if (!task_running(rq, p) &&
+           !test_tsk_need_resched(rq->curr) &&
            rq->rt.overloaded)
                push_rt_tasks(rq);
 }
@@ -761,9 +1309,11 @@ move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
        /* don't touch RT tasks */
        return 0;
 }
-static void set_cpus_allowed_rt(struct task_struct *p, cpumask_t *new_mask)
+
+static void set_cpus_allowed_rt(struct task_struct *p,
+                               const struct cpumask *new_mask)
 {
-       int weight = cpus_weight(*new_mask);
+       int weight = cpumask_weight(new_mask);
 
        BUG_ON(!rt_task(p));
 
@@ -771,12 +1321,12 @@ static void set_cpus_allowed_rt(struct task_struct *p, cpumask_t *new_mask)
         * Update the migration status of the RQ if we have an RT task
         * which is running AND changing its weight value.
         */
-       if (p->se.on_rq && (weight != p->nr_cpus_allowed)) {
+       if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
                struct rq *rq = task_rq(p);
 
-               if ((p->nr_cpus_allowed <= 1) && (weight > 1))
+               if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
                        rq->rt.rt_nr_migratory++;
-               else if((p->nr_cpus_allowed > 1) && (weight <= 1)) {
+               } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
                        BUG_ON(!rq->rt.rt_nr_migratory);
                        rq->rt.rt_nr_migratory--;
                }
@@ -784,19 +1334,154 @@ static void set_cpus_allowed_rt(struct task_struct *p, cpumask_t *new_mask)
                update_rt_migration(rq);
        }
 
-       p->cpus_allowed    = *new_mask;
-       p->nr_cpus_allowed = weight;
+       cpumask_copy(&p->cpus_allowed, new_mask);
+       p->rt.nr_cpus_allowed = weight;
+}
+
+/* Assumes rq->lock is held */
+static void rq_online_rt(struct rq *rq)
+{
+       if (rq->rt.overloaded)
+               rt_set_overload(rq);
+
+       __enable_runtime(rq);
+
+       cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio);
+}
+
+/* Assumes rq->lock is held */
+static void rq_offline_rt(struct rq *rq)
+{
+       if (rq->rt.overloaded)
+               rt_clear_overload(rq);
+
+       __disable_runtime(rq);
+
+       cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
 }
-#else /* CONFIG_SMP */
-# define schedule_tail_balance_rt(rq)  do { } while (0)
-# define schedule_balance_rt(rq, prev) do { } while (0)
-# define wakeup_balance_rt(rq, p)      do { } while (0)
+
+/*
+ * When switch from the rt queue, we bring ourselves to a position
+ * that we might want to pull RT tasks from other runqueues.
+ */
+static void switched_from_rt(struct rq *rq, struct task_struct *p,
+                          int running)
+{
+       /*
+        * If there are other RT tasks then we will reschedule
+        * and the scheduling of the other RT tasks will handle
+        * the balancing. But if we are the last RT task
+        * we may need to handle the pulling of RT tasks
+        * now.
+        */
+       if (!rq->rt.rt_nr_running)
+               pull_rt_task(rq);
+}
+
+static inline void init_sched_rt_class(void)
+{
+       unsigned int i;
+
+       for_each_possible_cpu(i)
+               alloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
+                                       GFP_KERNEL, cpu_to_node(i));
+}
+#endif /* CONFIG_SMP */
+
+/*
+ * When switching a task to RT, we may overload the runqueue
+ * with RT tasks. In this case we try to push them off to
+ * other runqueues.
+ */
+static void switched_to_rt(struct rq *rq, struct task_struct *p,
+                          int running)
+{
+       int check_resched = 1;
+
+       /*
+        * If we are already running, then there's nothing
+        * that needs to be done. But if we are not running
+        * we may need to preempt the current running task.
+        * If that current running task is also an RT task
+        * then see if we can move to another run queue.
+        */
+       if (!running) {
+#ifdef CONFIG_SMP
+               if (rq->rt.overloaded && push_rt_task(rq) &&
+                   /* Don't resched if we changed runqueues */
+                   rq != task_rq(p))
+                       check_resched = 0;
 #endif /* CONFIG_SMP */
+               if (check_resched && p->prio < rq->curr->prio)
+                       resched_task(rq->curr);
+       }
+}
 
-static void task_tick_rt(struct rq *rq, struct task_struct *p)
+/*
+ * Priority of the task has changed. This may cause
+ * us to initiate a push or pull.
+ */
+static void prio_changed_rt(struct rq *rq, struct task_struct *p,
+                           int oldprio, int running)
+{
+       if (running) {
+#ifdef CONFIG_SMP
+               /*
+                * If our priority decreases while running, we
+                * may need to pull tasks to this runqueue.
+                */
+               if (oldprio < p->prio)
+                       pull_rt_task(rq);
+               /*
+                * If there's a higher priority task waiting to run
+                * then reschedule. Note, the above pull_rt_task
+                * can release the rq lock and p could migrate.
+                * Only reschedule if p is still on the same runqueue.
+                */
+               if (p->prio > rq->rt.highest_prio && rq->curr == p)
+                       resched_task(p);
+#else
+               /* For UP simply resched on drop of prio */
+               if (oldprio < p->prio)
+                       resched_task(p);
+#endif /* CONFIG_SMP */
+       } else {
+               /*
+                * This task is not running, but if it is
+                * greater than the current running task
+                * then reschedule.
+                */
+               if (p->prio < rq->curr->prio)
+                       resched_task(rq->curr);
+       }
+}
+
+static void watchdog(struct rq *rq, struct task_struct *p)
+{
+       unsigned long soft, hard;
+
+       if (!p->signal)
+               return;
+
+       soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;
+       hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;
+
+       if (soft != RLIM_INFINITY) {
+               unsigned long next;
+
+               p->rt.timeout++;
+               next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
+               if (p->rt.timeout > next)
+                       p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
+       }
+}
+
+static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
 {
        update_curr_rt(rq);
 
+       watchdog(rq, p);
+
        /*
         * RR tasks need a special form of timeslice management.
         * FIFO tasks have no timeslices.
@@ -804,17 +1489,17 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p)
        if (p->policy != SCHED_RR)
                return;
 
-       if (--p->time_slice)
+       if (--p->rt.time_slice)
                return;
 
-       p->time_slice = DEF_TIMESLICE;
+       p->rt.time_slice = DEF_TIMESLICE;
 
        /*
         * Requeue to the end of queue if we are not the only element
         * on the queue:
         */
-       if (p->run_list.prev != p->run_list.next) {
-               requeue_task_rt(rq, p);
+       if (p->rt.run_list.prev != p->rt.run_list.next) {
+               requeue_task_rt(rq, p, 0);
                set_tsk_need_resched(p);
        }
 }
@@ -826,14 +1511,11 @@ static void set_curr_task_rt(struct rq *rq)
        p->se.exec_start = rq->clock;
 }
 
-const struct sched_class rt_sched_class = {
+static const struct sched_class rt_sched_class = {
        .next                   = &fair_sched_class,
        .enqueue_task           = enqueue_task_rt,
        .dequeue_task           = dequeue_task_rt,
        .yield_task             = yield_task_rt,
-#ifdef CONFIG_SMP
-       .select_task_rq         = select_task_rq_rt,
-#endif /* CONFIG_SMP */
 
        .check_preempt_curr     = check_preempt_curr_rt,
 
@@ -841,11 +1523,37 @@ const struct sched_class rt_sched_class = {
        .put_prev_task          = put_prev_task_rt,
 
 #ifdef CONFIG_SMP
+       .select_task_rq         = select_task_rq_rt,
+
        .load_balance           = load_balance_rt,
        .move_one_task          = move_one_task_rt,
        .set_cpus_allowed       = set_cpus_allowed_rt,
+       .rq_online              = rq_online_rt,
+       .rq_offline             = rq_offline_rt,
+       .pre_schedule           = pre_schedule_rt,
+       .post_schedule          = post_schedule_rt,
+       .task_wake_up           = task_wake_up_rt,
+       .switched_from          = switched_from_rt,
 #endif
 
        .set_curr_task          = set_curr_task_rt,
        .task_tick              = task_tick_rt,
+
+       .prio_changed           = prio_changed_rt,
+       .switched_to            = switched_to_rt,
 };
+
+#ifdef CONFIG_SCHED_DEBUG
+extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
+
+static void print_rt_stats(struct seq_file *m, int cpu)
+{
+       struct rt_rq *rt_rq;
+
+       rcu_read_lock();
+       for_each_leaf_rt_rq(rt_rq, cpu_rq(cpu))
+               print_rt_rq(m, cpu, rt_rq);
+       rcu_read_unlock();
+}
+#endif /* CONFIG_SCHED_DEBUG */
+