relayfs: support larger relay buffer
[safe/jmp/linux-2.6] / kernel / sched_rt.c
index 274b40d..c2730a5 100644 (file)
@@ -55,14 +55,19 @@ static inline int on_rt_rq(struct sched_rt_entity *rt_se)
        return !list_empty(&rt_se->run_list);
 }
 
-#ifdef CONFIG_FAIR_GROUP_SCHED
+#ifdef CONFIG_RT_GROUP_SCHED
 
-static inline unsigned int sched_rt_ratio(struct rt_rq *rt_rq)
+static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
 {
        if (!rt_rq->tg)
-               return SCHED_RT_FRAC;
+               return RUNTIME_INF;
 
-       return rt_rq->tg->rt_ratio;
+       return rt_rq->rt_runtime;
+}
+
+static inline u64 sched_rt_period(struct rt_rq *rt_rq)
+{
+       return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
 }
 
 #define for_each_leaf_rt_rq(rt_rq, rq) \
@@ -89,7 +94,7 @@ static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
 static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
 
-static void sched_rt_ratio_enqueue(struct rt_rq *rt_rq)
+static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
 {
        struct sched_rt_entity *rt_se = rt_rq->rt_se;
 
@@ -102,7 +107,7 @@ static void sched_rt_ratio_enqueue(struct rt_rq *rt_rq)
        }
 }
 
-static void sched_rt_ratio_dequeue(struct rt_rq *rt_rq)
+static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
 {
        struct sched_rt_entity *rt_se = rt_rq->rt_se;
 
@@ -110,11 +115,56 @@ static void sched_rt_ratio_dequeue(struct rt_rq *rt_rq)
                dequeue_rt_entity(rt_se);
 }
 
+static inline int rt_rq_throttled(struct rt_rq *rt_rq)
+{
+       return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
+}
+
+static int rt_se_boosted(struct sched_rt_entity *rt_se)
+{
+       struct rt_rq *rt_rq = group_rt_rq(rt_se);
+       struct task_struct *p;
+
+       if (rt_rq)
+               return !!rt_rq->rt_nr_boosted;
+
+       p = rt_task_of(rt_se);
+       return p->prio != p->normal_prio;
+}
+
+#ifdef CONFIG_SMP
+static inline cpumask_t sched_rt_period_mask(void)
+{
+       return cpu_rq(smp_processor_id())->rd->span;
+}
+#else
+static inline cpumask_t sched_rt_period_mask(void)
+{
+       return cpu_online_map;
+}
+#endif
+
+static inline
+struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
+{
+       return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
+}
+
+static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
+{
+       return &rt_rq->tg->rt_bandwidth;
+}
+
 #else
 
-static inline unsigned int sched_rt_ratio(struct rt_rq *rt_rq)
+static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
 {
-       return sysctl_sched_rt_ratio;
+       return rt_rq->rt_runtime;
+}
+
+static inline u64 sched_rt_period(struct rt_rq *rt_rq)
+{
+       return ktime_to_ns(def_rt_bandwidth.rt_period);
 }
 
 #define for_each_leaf_rt_rq(rt_rq, rq) \
@@ -141,19 +191,119 @@ static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
        return NULL;
 }
 
-static inline void sched_rt_ratio_enqueue(struct rt_rq *rt_rq)
+static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
+{
+}
+
+static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
+{
+}
+
+static inline int rt_rq_throttled(struct rt_rq *rt_rq)
+{
+       return rt_rq->rt_throttled;
+}
+
+static inline cpumask_t sched_rt_period_mask(void)
+{
+       return cpu_online_map;
+}
+
+static inline
+struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
+{
+       return &cpu_rq(cpu)->rt;
+}
+
+static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
 {
+       return &def_rt_bandwidth;
 }
 
-static inline void sched_rt_ratio_dequeue(struct rt_rq *rt_rq)
+#endif
+
+static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
 {
+       int i, idle = 1;
+       cpumask_t span;
+
+       if (rt_b->rt_runtime == RUNTIME_INF)
+               return 1;
+
+       span = sched_rt_period_mask();
+       for_each_cpu_mask(i, span) {
+               int enqueue = 0;
+               struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
+               struct rq *rq = rq_of_rt_rq(rt_rq);
+
+               spin_lock(&rq->lock);
+               if (rt_rq->rt_time) {
+                       u64 runtime;
+
+                       spin_lock(&rt_rq->rt_runtime_lock);
+                       runtime = rt_rq->rt_runtime;
+                       rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
+                       if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
+                               rt_rq->rt_throttled = 0;
+                               enqueue = 1;
+                       }
+                       if (rt_rq->rt_time || rt_rq->rt_nr_running)
+                               idle = 0;
+                       spin_unlock(&rt_rq->rt_runtime_lock);
+               }
+
+               if (enqueue)
+                       sched_rt_rq_enqueue(rt_rq);
+               spin_unlock(&rq->lock);
+       }
+
+       return idle;
 }
 
+#ifdef CONFIG_SMP
+static int balance_runtime(struct rt_rq *rt_rq)
+{
+       struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
+       struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
+       int i, weight, more = 0;
+       u64 rt_period;
+
+       weight = cpus_weight(rd->span);
+
+       spin_lock(&rt_b->rt_runtime_lock);
+       rt_period = ktime_to_ns(rt_b->rt_period);
+       for_each_cpu_mask(i, rd->span) {
+               struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
+               s64 diff;
+
+               if (iter == rt_rq)
+                       continue;
+
+               spin_lock(&iter->rt_runtime_lock);
+               diff = iter->rt_runtime - iter->rt_time;
+               if (diff > 0) {
+                       do_div(diff, weight);
+                       if (rt_rq->rt_runtime + diff > rt_period)
+                               diff = rt_period - rt_rq->rt_runtime;
+                       iter->rt_runtime -= diff;
+                       rt_rq->rt_runtime += diff;
+                       more = 1;
+                       if (rt_rq->rt_runtime == rt_period) {
+                               spin_unlock(&iter->rt_runtime_lock);
+                               break;
+                       }
+               }
+               spin_unlock(&iter->rt_runtime_lock);
+       }
+       spin_unlock(&rt_b->rt_runtime_lock);
+
+       return more;
+}
 #endif
 
 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
 {
-#ifdef CONFIG_FAIR_GROUP_SCHED
+#ifdef CONFIG_RT_GROUP_SCHED
        struct rt_rq *rt_rq = group_rt_rq(rt_se);
 
        if (rt_rq)
@@ -163,55 +313,41 @@ static inline int rt_se_prio(struct sched_rt_entity *rt_se)
        return rt_task_of(rt_se)->prio;
 }
 
-static int sched_rt_ratio_exceeded(struct rt_rq *rt_rq)
+static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
 {
-       unsigned int rt_ratio = sched_rt_ratio(rt_rq);
-       u64 period, ratio;
+       u64 runtime = sched_rt_runtime(rt_rq);
 
-       if (rt_ratio == SCHED_RT_FRAC)
+       if (runtime == RUNTIME_INF)
                return 0;
 
        if (rt_rq->rt_throttled)
-               return 1;
+               return rt_rq_throttled(rt_rq);
 
-       period = (u64)sysctl_sched_rt_period * NSEC_PER_MSEC;
-       ratio = (period * rt_ratio) >> SCHED_RT_FRAC_SHIFT;
+       if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
+               return 0;
 
-       if (rt_rq->rt_time > ratio) {
-               struct rq *rq = rq_of_rt_rq(rt_rq);
+#ifdef CONFIG_SMP
+       if (rt_rq->rt_time > runtime) {
+               int more;
 
-               rq->rt_throttled = 1;
-               rt_rq->rt_throttled = 1;
+               spin_unlock(&rt_rq->rt_runtime_lock);
+               more = balance_runtime(rt_rq);
+               spin_lock(&rt_rq->rt_runtime_lock);
 
-               sched_rt_ratio_dequeue(rt_rq);
-               return 1;
+               if (more)
+                       runtime = sched_rt_runtime(rt_rq);
        }
+#endif
 
-       return 0;
-}
-
-static void update_sched_rt_period(struct rq *rq)
-{
-       struct rt_rq *rt_rq;
-       u64 period;
-
-       while (rq->clock > rq->rt_period_expire) {
-               period = (u64)sysctl_sched_rt_period * NSEC_PER_MSEC;
-               rq->rt_period_expire += period;
-
-               for_each_leaf_rt_rq(rt_rq, rq) {
-                       unsigned long rt_ratio = sched_rt_ratio(rt_rq);
-                       u64 ratio = (period * rt_ratio) >> SCHED_RT_FRAC_SHIFT;
-
-                       rt_rq->rt_time -= min(rt_rq->rt_time, ratio);
-                       if (rt_rq->rt_throttled) {
-                               rt_rq->rt_throttled = 0;
-                               sched_rt_ratio_enqueue(rt_rq);
-                       }
+       if (rt_rq->rt_time > runtime) {
+               rt_rq->rt_throttled = 1;
+               if (rt_rq_throttled(rt_rq)) {
+                       sched_rt_rq_dequeue(rt_rq);
+                       return 1;
                }
-
-               rq->rt_throttled = 0;
        }
+
+       return 0;
 }
 
 /*
@@ -238,14 +374,15 @@ static void update_curr_rt(struct rq *rq)
        curr->se.exec_start = rq->clock;
        cpuacct_charge(curr, delta_exec);
 
-       rt_rq->rt_time += delta_exec;
-       /*
-        * might make it a tad more accurate:
-        *
-        * update_sched_rt_period(rq);
-        */
-       if (sched_rt_ratio_exceeded(rt_rq))
-               resched_task(curr);
+       for_each_sched_rt_entity(rt_se) {
+               rt_rq = rt_rq_of_se(rt_se);
+
+               spin_lock(&rt_rq->rt_runtime_lock);
+               rt_rq->rt_time += delta_exec;
+               if (sched_rt_runtime_exceeded(rt_rq))
+                       resched_task(curr);
+               spin_unlock(&rt_rq->rt_runtime_lock);
+       }
 }
 
 static inline
@@ -253,7 +390,7 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 {
        WARN_ON(!rt_prio(rt_se_prio(rt_se)));
        rt_rq->rt_nr_running++;
-#if defined CONFIG_SMP || defined CONFIG_FAIR_GROUP_SCHED
+#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
        if (rt_se_prio(rt_se) < rt_rq->highest_prio)
                rt_rq->highest_prio = rt_se_prio(rt_se);
 #endif
@@ -265,6 +402,15 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 
        update_rt_migration(rq_of_rt_rq(rt_rq));
 #endif
+#ifdef CONFIG_RT_GROUP_SCHED
+       if (rt_se_boosted(rt_se))
+               rt_rq->rt_nr_boosted++;
+
+       if (rt_rq->tg)
+               start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
+#else
+       start_rt_bandwidth(&def_rt_bandwidth);
+#endif
 }
 
 static inline
@@ -273,7 +419,7 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
        WARN_ON(!rt_prio(rt_se_prio(rt_se)));
        WARN_ON(!rt_rq->rt_nr_running);
        rt_rq->rt_nr_running--;
-#if defined CONFIG_SMP || defined CONFIG_FAIR_GROUP_SCHED
+#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
        if (rt_rq->rt_nr_running) {
                struct rt_prio_array *array;
 
@@ -295,6 +441,12 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 
        update_rt_migration(rq_of_rt_rq(rt_rq));
 #endif /* CONFIG_SMP */
+#ifdef CONFIG_RT_GROUP_SCHED
+       if (rt_se_boosted(rt_se))
+               rt_rq->rt_nr_boosted--;
+
+       WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
+#endif
 }
 
 static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
@@ -303,7 +455,7 @@ static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
        struct rt_prio_array *array = &rt_rq->active;
        struct rt_rq *group_rq = group_rt_rq(rt_se);
 
-       if (group_rq && group_rq->rt_throttled)
+       if (group_rq && rt_rq_throttled(group_rq))
                return;
 
        list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
@@ -327,27 +479,21 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
 /*
  * Because the prio of an upper entry depends on the lower
  * entries, we must remove entries top - down.
- *
- * XXX: O(1/2 h^2) because we can only walk up, not down the chain.
- *      doesn't matter much for now, as h=2 for GROUP_SCHED.
  */
 static void dequeue_rt_stack(struct task_struct *p)
 {
-       struct sched_rt_entity *rt_se, *top_se;
+       struct sched_rt_entity *rt_se, *back = NULL;
 
-       /*
-        * dequeue all, top - down.
-        */
-       do {
-               rt_se = &p->rt;
-               top_se = NULL;
-               for_each_sched_rt_entity(rt_se) {
-                       if (on_rt_rq(rt_se))
-                               top_se = rt_se;
-               }
-               if (top_se)
-                       dequeue_rt_entity(top_se);
-       } while (top_se);
+       rt_se = &p->rt;
+       for_each_sched_rt_entity(rt_se) {
+               rt_se->back = back;
+               back = rt_se;
+       }
+
+       for (rt_se = back; rt_se; rt_se = rt_se->back) {
+               if (on_rt_rq(rt_se))
+                       dequeue_rt_entity(rt_se);
+       }
 }
 
 /*
@@ -496,7 +642,7 @@ static struct task_struct *pick_next_task_rt(struct rq *rq)
        if (unlikely(!rt_rq->rt_nr_running))
                return NULL;
 
-       if (sched_rt_ratio_exceeded(rt_rq))
+       if (rt_rq_throttled(rt_rq))
                return NULL;
 
        do {
@@ -979,7 +1125,8 @@ move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
        return 0;
 }
 
-static void set_cpus_allowed_rt(struct task_struct *p, cpumask_t *new_mask)
+static void set_cpus_allowed_rt(struct task_struct *p,
+                               const cpumask_t *new_mask)
 {
        int weight = cpus_weight(*new_mask);
 
@@ -1085,9 +1232,11 @@ static void prio_changed_rt(struct rq *rq, struct task_struct *p,
                        pull_rt_task(rq);
                /*
                 * If there's a higher priority task waiting to run
-                * then reschedule.
+                * then reschedule. Note, the above pull_rt_task
+                * can release the rq lock and p could migrate.
+                * Only reschedule if p is still on the same runqueue.
                 */
-               if (p->prio > rq->rt.highest_prio)
+               if (p->prio > rq->rt.highest_prio && rq->curr == p)
                        resched_task(p);
 #else
                /* For UP simply resched on drop of prio */