schedstat: consolidate per-task cpu runtime stats
authorKen Chen <kenchen@google.com>
Wed, 17 Dec 2008 07:41:22 +0000 (23:41 -0800)
committerIngo Molnar <mingo@elte.hu>
Thu, 18 Dec 2008 12:54:01 +0000 (13:54 +0100)
Impact: simplify code

When we turn on CONFIG_SCHEDSTATS, per-task cpu runtime is accumulated
twice. Once in task->se.sum_exec_runtime and once in sched_info.cpu_time.
These two stats are exactly the same.

Given that task->se.sum_exec_runtime is always accumulated by the core
scheduler, sched_info can reuse that data instead of duplicate the accounting.

Signed-off-by: Ken Chen <kenchen@google.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
fs/proc/base.c
include/linux/sched.h
kernel/delayacct.c
kernel/sched.c
kernel/sched_stats.h

index d467760..4d745ba 100644 (file)
@@ -347,7 +347,7 @@ static int proc_pid_wchan(struct task_struct *task, char *buffer)
 static int proc_pid_schedstat(struct task_struct *task, char *buffer)
 {
        return sprintf(buffer, "%llu %llu %lu\n",
-                       task->sched_info.cpu_time,
+                       task->se.sum_exec_runtime,
                        task->sched_info.run_delay,
                        task->sched_info.pcount);
 }
index 8cccd6d..2d1e840 100644 (file)
@@ -670,8 +670,7 @@ struct reclaim_state;
 struct sched_info {
        /* cumulative counters */
        unsigned long pcount;         /* # of times run on this cpu */
-       unsigned long long cpu_time,  /* time spent on the cpu */
-                          run_delay; /* time spent waiting on a runqueue */
+       unsigned long long run_delay; /* time spent waiting on a runqueue */
 
        /* timestamps */
        unsigned long long last_arrival,/* when we last ran on a cpu */
index b3179da..abb6e17 100644 (file)
@@ -127,7 +127,7 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
         */
        t1 = tsk->sched_info.pcount;
        t2 = tsk->sched_info.run_delay;
-       t3 = tsk->sched_info.cpu_time;
+       t3 = tsk->se.sum_exec_runtime;
 
        d->cpu_count += t1;
 
index f53e2b8..fd835fc 100644 (file)
@@ -596,6 +596,8 @@ struct rq {
 #ifdef CONFIG_SCHEDSTATS
        /* latency stats */
        struct sched_info rq_sched_info;
+       unsigned long long rq_cpu_time;
+       /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
 
        /* sys_sched_yield() stats */
        unsigned int yld_exp_empty;
index 7dbf72a..3b01098 100644 (file)
@@ -31,7 +31,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
                    rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count,
                    rq->sched_switch, rq->sched_count, rq->sched_goidle,
                    rq->ttwu_count, rq->ttwu_local,
-                   rq->rq_sched_info.cpu_time,
+                   rq->rq_cpu_time,
                    rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
 
                seq_printf(seq, "\n");
@@ -123,7 +123,7 @@ static inline void
 rq_sched_info_depart(struct rq *rq, unsigned long long delta)
 {
        if (rq)
-               rq->rq_sched_info.cpu_time += delta;
+               rq->rq_cpu_time += delta;
 }
 
 static inline void
@@ -236,7 +236,6 @@ static inline void sched_info_depart(struct task_struct *t)
        unsigned long long delta = task_rq(t)->clock -
                                        t->sched_info.last_arrival;
 
-       t->sched_info.cpu_time += delta;
        rq_sched_info_depart(task_rq(t), delta);
 
        if (t->state == TASK_RUNNING)