relayfs: support larger relay buffer
[safe/jmp/linux-2.6] / kernel / sched_rt.c
index 35825b2..c2730a5 100644 (file)
@@ -55,14 +55,19 @@ static inline int on_rt_rq(struct sched_rt_entity *rt_se)
        return !list_empty(&rt_se->run_list);
 }
 
-#ifdef CONFIG_FAIR_GROUP_SCHED
+#ifdef CONFIG_RT_GROUP_SCHED
 
 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
 {
        if (!rt_rq->tg)
                return RUNTIME_INF;
 
-       return rt_rq->tg->rt_runtime;
+       return rt_rq->rt_runtime;
+}
+
+static inline u64 sched_rt_period(struct rt_rq *rt_rq)
+{
+       return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
 }
 
 #define for_each_leaf_rt_rq(rt_rq, rq) \
@@ -127,14 +132,39 @@ static int rt_se_boosted(struct sched_rt_entity *rt_se)
        return p->prio != p->normal_prio;
 }
 
+#ifdef CONFIG_SMP
+static inline cpumask_t sched_rt_period_mask(void)
+{
+       return cpu_rq(smp_processor_id())->rd->span;
+}
+#else
+static inline cpumask_t sched_rt_period_mask(void)
+{
+       return cpu_online_map;
+}
+#endif
+
+static inline
+struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
+{
+       return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
+}
+
+static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
+{
+       return &rt_rq->tg->rt_bandwidth;
+}
+
 #else
 
 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
 {
-       if (sysctl_sched_rt_runtime == -1)
-               return RUNTIME_INF;
+       return rt_rq->rt_runtime;
+}
 
-       return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
+static inline u64 sched_rt_period(struct rt_rq *rt_rq)
+{
+       return ktime_to_ns(def_rt_bandwidth.rt_period);
 }
 
 #define for_each_leaf_rt_rq(rt_rq, rq) \
@@ -173,11 +203,107 @@ static inline int rt_rq_throttled(struct rt_rq *rt_rq)
 {
        return rt_rq->rt_throttled;
 }
+
+static inline cpumask_t sched_rt_period_mask(void)
+{
+       return cpu_online_map;
+}
+
+static inline
+struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
+{
+       return &cpu_rq(cpu)->rt;
+}
+
+static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
+{
+       return &def_rt_bandwidth;
+}
+
+#endif
+
+static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
+{
+       int i, idle = 1;
+       cpumask_t span;
+
+       if (rt_b->rt_runtime == RUNTIME_INF)
+               return 1;
+
+       span = sched_rt_period_mask();
+       for_each_cpu_mask(i, span) {
+               int enqueue = 0;
+               struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
+               struct rq *rq = rq_of_rt_rq(rt_rq);
+
+               spin_lock(&rq->lock);
+               if (rt_rq->rt_time) {
+                       u64 runtime;
+
+                       spin_lock(&rt_rq->rt_runtime_lock);
+                       runtime = rt_rq->rt_runtime;
+                       rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
+                       if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
+                               rt_rq->rt_throttled = 0;
+                               enqueue = 1;
+                       }
+                       if (rt_rq->rt_time || rt_rq->rt_nr_running)
+                               idle = 0;
+                       spin_unlock(&rt_rq->rt_runtime_lock);
+               }
+
+               if (enqueue)
+                       sched_rt_rq_enqueue(rt_rq);
+               spin_unlock(&rq->lock);
+       }
+
+       return idle;
+}
+
+#ifdef CONFIG_SMP
+static int balance_runtime(struct rt_rq *rt_rq)
+{
+       struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
+       struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
+       int i, weight, more = 0;
+       u64 rt_period;
+
+       weight = cpus_weight(rd->span);
+
+       spin_lock(&rt_b->rt_runtime_lock);
+       rt_period = ktime_to_ns(rt_b->rt_period);
+       for_each_cpu_mask(i, rd->span) {
+               struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
+               s64 diff;
+
+               if (iter == rt_rq)
+                       continue;
+
+               spin_lock(&iter->rt_runtime_lock);
+               diff = iter->rt_runtime - iter->rt_time;
+               if (diff > 0) {
+                       do_div(diff, weight);
+                       if (rt_rq->rt_runtime + diff > rt_period)
+                               diff = rt_period - rt_rq->rt_runtime;
+                       iter->rt_runtime -= diff;
+                       rt_rq->rt_runtime += diff;
+                       more = 1;
+                       if (rt_rq->rt_runtime == rt_period) {
+                               spin_unlock(&iter->rt_runtime_lock);
+                               break;
+                       }
+               }
+               spin_unlock(&iter->rt_runtime_lock);
+       }
+       spin_unlock(&rt_b->rt_runtime_lock);
+
+       return more;
+}
 #endif
 
 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
 {
-#ifdef CONFIG_FAIR_GROUP_SCHED
+#ifdef CONFIG_RT_GROUP_SCHED
        struct rt_rq *rt_rq = group_rt_rq(rt_se);
 
        if (rt_rq)
@@ -197,12 +323,24 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
        if (rt_rq->rt_throttled)
                return rt_rq_throttled(rt_rq);
 
+       if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
+               return 0;
+
+#ifdef CONFIG_SMP
        if (rt_rq->rt_time > runtime) {
-               struct rq *rq = rq_of_rt_rq(rt_rq);
+               int more;
 
-               rq->rt_throttled = 1;
-               rt_rq->rt_throttled = 1;
+               spin_unlock(&rt_rq->rt_runtime_lock);
+               more = balance_runtime(rt_rq);
+               spin_lock(&rt_rq->rt_runtime_lock);
+
+               if (more)
+                       runtime = sched_rt_runtime(rt_rq);
+       }
+#endif
 
+       if (rt_rq->rt_time > runtime) {
+               rt_rq->rt_throttled = 1;
                if (rt_rq_throttled(rt_rq)) {
                        sched_rt_rq_dequeue(rt_rq);
                        return 1;
@@ -212,29 +350,6 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
        return 0;
 }
 
-static void update_sched_rt_period(struct rq *rq)
-{
-       struct rt_rq *rt_rq;
-       u64 period;
-
-       while (rq->clock > rq->rt_period_expire) {
-               period = (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
-               rq->rt_period_expire += period;
-
-               for_each_leaf_rt_rq(rt_rq, rq) {
-                       u64 runtime = sched_rt_runtime(rt_rq);
-
-                       rt_rq->rt_time -= min(rt_rq->rt_time, runtime);
-                       if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
-                               rt_rq->rt_throttled = 0;
-                               sched_rt_rq_enqueue(rt_rq);
-                       }
-               }
-
-               rq->rt_throttled = 0;
-       }
-}
-
 /*
  * Update the current task's runtime statistics. Skip current tasks that
  * are not in our scheduling class.
@@ -259,9 +374,15 @@ static void update_curr_rt(struct rq *rq)
        curr->se.exec_start = rq->clock;
        cpuacct_charge(curr, delta_exec);
 
-       rt_rq->rt_time += delta_exec;
-       if (sched_rt_runtime_exceeded(rt_rq))
-               resched_task(curr);
+       for_each_sched_rt_entity(rt_se) {
+               rt_rq = rt_rq_of_se(rt_se);
+
+               spin_lock(&rt_rq->rt_runtime_lock);
+               rt_rq->rt_time += delta_exec;
+               if (sched_rt_runtime_exceeded(rt_rq))
+                       resched_task(curr);
+               spin_unlock(&rt_rq->rt_runtime_lock);
+       }
 }
 
 static inline
@@ -269,7 +390,7 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 {
        WARN_ON(!rt_prio(rt_se_prio(rt_se)));
        rt_rq->rt_nr_running++;
-#if defined CONFIG_SMP || defined CONFIG_FAIR_GROUP_SCHED
+#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
        if (rt_se_prio(rt_se) < rt_rq->highest_prio)
                rt_rq->highest_prio = rt_se_prio(rt_se);
 #endif
@@ -281,9 +402,14 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 
        update_rt_migration(rq_of_rt_rq(rt_rq));
 #endif
-#ifdef CONFIG_FAIR_GROUP_SCHED
+#ifdef CONFIG_RT_GROUP_SCHED
        if (rt_se_boosted(rt_se))
                rt_rq->rt_nr_boosted++;
+
+       if (rt_rq->tg)
+               start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
+#else
+       start_rt_bandwidth(&def_rt_bandwidth);
 #endif
 }
 
@@ -293,7 +419,7 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
        WARN_ON(!rt_prio(rt_se_prio(rt_se)));
        WARN_ON(!rt_rq->rt_nr_running);
        rt_rq->rt_nr_running--;
-#if defined CONFIG_SMP || defined CONFIG_FAIR_GROUP_SCHED
+#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
        if (rt_rq->rt_nr_running) {
                struct rt_prio_array *array;
 
@@ -315,7 +441,7 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 
        update_rt_migration(rq_of_rt_rq(rt_rq));
 #endif /* CONFIG_SMP */
-#ifdef CONFIG_FAIR_GROUP_SCHED
+#ifdef CONFIG_RT_GROUP_SCHED
        if (rt_se_boosted(rt_se))
                rt_rq->rt_nr_boosted--;
 
@@ -353,27 +479,21 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
 /*
  * Because the prio of an upper entry depends on the lower
  * entries, we must remove entries top - down.
- *
- * XXX: O(1/2 h^2) because we can only walk up, not down the chain.
- *      doesn't matter much for now, as h=2 for GROUP_SCHED.
  */
 static void dequeue_rt_stack(struct task_struct *p)
 {
-       struct sched_rt_entity *rt_se, *top_se;
+       struct sched_rt_entity *rt_se, *back = NULL;
 
-       /*
-        * dequeue all, top - down.
-        */
-       do {
-               rt_se = &p->rt;
-               top_se = NULL;
-               for_each_sched_rt_entity(rt_se) {
-                       if (on_rt_rq(rt_se))
-                               top_se = rt_se;
-               }
-               if (top_se)
-                       dequeue_rt_entity(top_se);
-       } while (top_se);
+       rt_se = &p->rt;
+       for_each_sched_rt_entity(rt_se) {
+               rt_se->back = back;
+               back = rt_se;
+       }
+
+       for (rt_se = back; rt_se; rt_se = rt_se->back) {
+               if (on_rt_rq(rt_se))
+                       dequeue_rt_entity(rt_se);
+       }
 }
 
 /*
@@ -1005,7 +1125,8 @@ move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
        return 0;
 }
 
-static void set_cpus_allowed_rt(struct task_struct *p, cpumask_t *new_mask)
+static void set_cpus_allowed_rt(struct task_struct *p,
+                               const cpumask_t *new_mask)
 {
        int weight = cpus_weight(*new_mask);
 
@@ -1111,9 +1232,11 @@ static void prio_changed_rt(struct rq *rq, struct task_struct *p,
                        pull_rt_task(rq);
                /*
                 * If there's a higher priority task waiting to run
-                * then reschedule.
+                * then reschedule. Note, the above pull_rt_task
+                * can release the rq lock and p could migrate.
+                * Only reschedule if p is still on the same runqueue.
                 */
-               if (p->prio > rq->rt.highest_prio)
+               if (p->prio > rq->rt.highest_prio && rq->curr == p)
                        resched_task(p);
 #else
                /* For UP simply resched on drop of prio */