do_wait() wakeup optimization: shift security_task_wait() from eligible_child() to...
[safe/jmp/linux-2.6] / kernel / sched_fair.c
index 566e3bb..ecc637a 100644 (file)
@@ -513,6 +513,7 @@ static void update_curr(struct cfs_rq *cfs_rq)
        if (entity_is_task(curr)) {
                struct task_struct *curtask = task_of(curr);
 
+               trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
                cpuacct_charge(curtask, delta_exec);
                account_group_exec_runtime(curtask, delta_exec);
        }
@@ -1938,6 +1939,25 @@ static void moved_group_fair(struct task_struct *p)
 }
 #endif
 
+unsigned int get_rr_interval_fair(struct task_struct *task)
+{
+       struct sched_entity *se = &task->se;
+       unsigned long flags;
+       struct rq *rq;
+       unsigned int rr_interval = 0;
+
+       /*
+        * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
+        * idle runqueue:
+        */
+       rq = task_rq_lock(task, &flags);
+       if (rq->cfs.load.weight)
+               rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
+       task_rq_unlock(rq, &flags);
+
+       return rr_interval;
+}
+
 /*
  * All the scheduling class methods:
  */
@@ -1966,6 +1986,8 @@ static const struct sched_class fair_sched_class = {
        .prio_changed           = prio_changed_fair,
        .switched_to            = switched_to_fair,
 
+       .get_rr_interval        = get_rr_interval_fair,
+
 #ifdef CONFIG_FAIR_GROUP_SCHED
        .moved_group            = moved_group_fair,
 #endif