sh: fix number of interrupts on se7343
[safe/jmp/linux-2.6] / kernel / sched_rt.c
index 908c04f..d9ba9d5 100644 (file)
@@ -102,12 +102,12 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
 
 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
 {
+       struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
        struct sched_rt_entity *rt_se = rt_rq->rt_se;
 
-       if (rt_se && !on_rt_rq(rt_se) && rt_rq->rt_nr_running) {
-               struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
-
-               enqueue_rt_entity(rt_se);
+       if (rt_rq->rt_nr_running) {
+               if (rt_se && !on_rt_rq(rt_se))
+                       enqueue_rt_entity(rt_se);
                if (rt_rq->highest_prio < curr->prio)
                        resched_task(curr);
        }
@@ -199,6 +199,8 @@ static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
 
 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
 {
+       if (rt_rq->rt_nr_running)
+               resched_task(rq_of_rt_rq(rt_rq)->curr);
 }
 
 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
@@ -229,6 +231,9 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
 #endif /* CONFIG_RT_GROUP_SCHED */
 
 #ifdef CONFIG_SMP
+/*
+ * We ran out of runtime, see if we can borrow some from our neighbours.
+ */
 static int do_balance_runtime(struct rt_rq *rt_rq)
 {
        struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
@@ -248,9 +253,18 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
                        continue;
 
                spin_lock(&iter->rt_runtime_lock);
+               /*
+                * Either all rqs have inf runtime and there's nothing to steal
+                * or __disable_runtime() below sets a specific rq to inf to
+                * indicate its been disabled and disalow stealing.
+                */
                if (iter->rt_runtime == RUNTIME_INF)
                        goto next;
 
+               /*
+                * From runqueues with spare time, take 1/n part of their
+                * spare time, but no more than our period.
+                */
                diff = iter->rt_runtime - iter->rt_time;
                if (diff > 0) {
                        diff = div_u64((u64)diff, weight);
@@ -272,6 +286,9 @@ next:
        return more;
 }
 
+/*
+ * Ensure this RQ takes back all the runtime it lend to its neighbours.
+ */
 static void __disable_runtime(struct rq *rq)
 {
        struct root_domain *rd = rq->rd;
@@ -287,18 +304,34 @@ static void __disable_runtime(struct rq *rq)
 
                spin_lock(&rt_b->rt_runtime_lock);
                spin_lock(&rt_rq->rt_runtime_lock);
+               /*
+                * Either we're all inf and nobody needs to borrow, or we're
+                * already disabled and thus have nothing to do, or we have
+                * exactly the right amount of runtime to take out.
+                */
                if (rt_rq->rt_runtime == RUNTIME_INF ||
                                rt_rq->rt_runtime == rt_b->rt_runtime)
                        goto balanced;
                spin_unlock(&rt_rq->rt_runtime_lock);
 
+               /*
+                * Calculate the difference between what we started out with
+                * and what we current have, that's the amount of runtime
+                * we lend and now have to reclaim.
+                */
                want = rt_b->rt_runtime - rt_rq->rt_runtime;
 
+               /*
+                * Greedy reclaim, take back as much as we can.
+                */
                for_each_cpu_mask(i, rd->span) {
                        struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
                        s64 diff;
 
-                       if (iter == rt_rq)
+                       /*
+                        * Can't reclaim from ourselves or disabled runqueues.
+                        */
+                       if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
                                continue;
 
                        spin_lock(&iter->rt_runtime_lock);
@@ -317,8 +350,16 @@ static void __disable_runtime(struct rq *rq)
                }
 
                spin_lock(&rt_rq->rt_runtime_lock);
+               /*
+                * We cannot be left wanting - that would mean some runtime
+                * leaked out of the system.
+                */
                BUG_ON(want);
 balanced:
+               /*
+                * Disable all the borrow logic by pretending we have inf
+                * runtime - in which case borrowing doesn't make sense.
+                */
                rt_rq->rt_runtime = RUNTIME_INF;
                spin_unlock(&rt_rq->rt_runtime_lock);
                spin_unlock(&rt_b->rt_runtime_lock);
@@ -341,6 +382,9 @@ static void __enable_runtime(struct rq *rq)
        if (unlikely(!scheduler_running))
                return;
 
+       /*
+        * Reset each runqueue's bandwidth settings
+        */
        for_each_leaf_rt_rq(rt_rq, rq) {
                struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 
@@ -348,6 +392,7 @@ static void __enable_runtime(struct rq *rq)
                spin_lock(&rt_rq->rt_runtime_lock);
                rt_rq->rt_runtime = rt_b->rt_runtime;
                rt_rq->rt_time = 0;
+               rt_rq->rt_throttled = 0;
                spin_unlock(&rt_rq->rt_runtime_lock);
                spin_unlock(&rt_b->rt_runtime_lock);
        }
@@ -386,7 +431,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
        int i, idle = 1;
        cpumask_t span;
 
-       if (rt_b->rt_runtime == RUNTIME_INF)
+       if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
                return 1;
 
        span = sched_rt_period_mask();
@@ -438,9 +483,6 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
 {
        u64 runtime = sched_rt_runtime(rt_rq);
 
-       if (runtime == RUNTIME_INF)
-               return 0;
-
        if (rt_rq->rt_throttled)
                return rt_rq_throttled(rt_rq);
 
@@ -484,16 +526,23 @@ static void update_curr_rt(struct rq *rq)
        schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
 
        curr->se.sum_exec_runtime += delta_exec;
+       account_group_exec_runtime(curr, delta_exec);
+
        curr->se.exec_start = rq->clock;
        cpuacct_charge(curr, delta_exec);
 
+       if (!rt_bandwidth_enabled())
+               return;
+
        for_each_sched_rt_entity(rt_se) {
                rt_rq = rt_rq_of_se(rt_se);
 
                spin_lock(&rt_rq->rt_runtime_lock);
-               rt_rq->rt_time += delta_exec;
-               if (sched_rt_runtime_exceeded(rt_rq))
-                       resched_task(curr);
+               if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
+                       rt_rq->rt_time += delta_exec;
+                       if (sched_rt_runtime_exceeded(rt_rq))
+                               resched_task(curr);
+               }
                spin_unlock(&rt_rq->rt_runtime_lock);
        }
 }
@@ -782,7 +831,7 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
 /*
  * Preempt the current task with a newly woken task if needed:
  */
-static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
+static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int sync)
 {
        if (p->prio < rq->curr->prio) {
                resched_task(rq->curr);
@@ -861,6 +910,8 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
 #define RT_MAX_TRIES 3
 
 static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
+static void double_unlock_balance(struct rq *this_rq, struct rq *busiest);
+
 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
 
 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
@@ -1022,7 +1073,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
                        break;
 
                /* try again */
-               spin_unlock(&lowest_rq->lock);
+               double_unlock_balance(rq, lowest_rq);
                lowest_rq = NULL;
        }
 
@@ -1091,7 +1142,7 @@ static int push_rt_task(struct rq *rq)
 
        resched_task(lowest_rq->curr);
 
-       spin_unlock(&lowest_rq->lock);
+       double_unlock_balance(rq, lowest_rq);
 
        ret = 1;
 out:
@@ -1197,7 +1248,7 @@ static int pull_rt_task(struct rq *this_rq)
 
                }
  skip:
-               spin_unlock(&src_rq->lock);
+               double_unlock_balance(this_rq, src_rq);
        }
 
        return ret;
@@ -1409,7 +1460,7 @@ static void watchdog(struct rq *rq, struct task_struct *p)
                p->rt.timeout++;
                next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
                if (p->rt.timeout > next)
-                       p->it_sched_expires = p->se.sum_exec_runtime;
+                       p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
        }
 }
 
@@ -1453,9 +1504,6 @@ static const struct sched_class rt_sched_class = {
        .enqueue_task           = enqueue_task_rt,
        .dequeue_task           = dequeue_task_rt,
        .yield_task             = yield_task_rt,
-#ifdef CONFIG_SMP
-       .select_task_rq         = select_task_rq_rt,
-#endif /* CONFIG_SMP */
 
        .check_preempt_curr     = check_preempt_curr_rt,
 
@@ -1463,6 +1511,8 @@ static const struct sched_class rt_sched_class = {
        .put_prev_task          = put_prev_task_rt,
 
 #ifdef CONFIG_SMP
+       .select_task_rq         = select_task_rq_rt,
+
        .load_balance           = load_balance_rt,
        .move_one_task          = move_one_task_rt,
        .set_cpus_allowed       = set_cpus_allowed_rt,