relayfs: support larger relay buffer
[safe/jmp/linux-2.6] / kernel / sched_rt.c
index 6928ded..c2730a5 100644 (file)
@@ -374,11 +374,15 @@ static void update_curr_rt(struct rq *rq)
        curr->se.exec_start = rq->clock;
        cpuacct_charge(curr, delta_exec);
 
-       spin_lock(&rt_rq->rt_runtime_lock);
-       rt_rq->rt_time += delta_exec;
-       if (sched_rt_runtime_exceeded(rt_rq))
-               resched_task(curr);
-       spin_unlock(&rt_rq->rt_runtime_lock);
+       for_each_sched_rt_entity(rt_se) {
+               rt_rq = rt_rq_of_se(rt_se);
+
+               spin_lock(&rt_rq->rt_runtime_lock);
+               rt_rq->rt_time += delta_exec;
+               if (sched_rt_runtime_exceeded(rt_rq))
+                       resched_task(curr);
+               spin_unlock(&rt_rq->rt_runtime_lock);
+       }
 }
 
 static inline
@@ -475,27 +479,21 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
 /*
  * Because the prio of an upper entry depends on the lower
  * entries, we must remove entries top - down.
- *
- * XXX: O(1/2 h^2) because we can only walk up, not down the chain.
- *      doesn't matter much for now, as h=2 for GROUP_SCHED.
  */
 static void dequeue_rt_stack(struct task_struct *p)
 {
-       struct sched_rt_entity *rt_se, *top_se;
+       struct sched_rt_entity *rt_se, *back = NULL;
 
-       /*
-        * dequeue all, top - down.
-        */
-       do {
-               rt_se = &p->rt;
-               top_se = NULL;
-               for_each_sched_rt_entity(rt_se) {
-                       if (on_rt_rq(rt_se))
-                               top_se = rt_se;
-               }
-               if (top_se)
-                       dequeue_rt_entity(top_se);
-       } while (top_se);
+       rt_se = &p->rt;
+       for_each_sched_rt_entity(rt_se) {
+               rt_se->back = back;
+               back = rt_se;
+       }
+
+       for (rt_se = back; rt_se; rt_se = rt_se->back) {
+               if (on_rt_rq(rt_se))
+                       dequeue_rt_entity(rt_se);
+       }
 }
 
 /*
@@ -515,6 +513,8 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
         */
        for_each_sched_rt_entity(rt_se)
                enqueue_rt_entity(rt_se);
+
+       inc_cpu_load(rq, p->se.load.weight);
 }
 
 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
@@ -534,6 +534,8 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
                if (rt_rq && rt_rq->rt_nr_running)
                        enqueue_rt_entity(rt_se);
        }
+
+       dec_cpu_load(rq, p->se.load.weight);
 }
 
 /*
@@ -1123,7 +1125,8 @@ move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
        return 0;
 }
 
-static void set_cpus_allowed_rt(struct task_struct *p, cpumask_t *new_mask)
+static void set_cpus_allowed_rt(struct task_struct *p,
+                               const cpumask_t *new_mask)
 {
        int weight = cpus_weight(*new_mask);