sched: fix ideal_runtime calculations for reniced tasks
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Wed, 5 Sep 2007 12:32:49 +0000 (14:32 +0200)
committerIngo Molnar <mingo@elte.hu>
Wed, 5 Sep 2007 12:32:49 +0000 (14:32 +0200)
fix ideal_runtime:

  - do not scale it using niced_granularity()
    it is against sum_exec_delta, so its wall-time, not fair-time.

  - move the whole check into __check_preempt_curr_fair()
    so that wakeup preemption can also benefit from the new logic.

this also results in code size reduction:

   text    data     bss     dec     hex filename
  13391     228    1204   14823    39e7 sched.o.before
  13369     228    1204   14801    39d1 sched.o.after

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/sched_fair.c

index 2d01bbc..892616b 100644 (file)
@@ -678,11 +678,31 @@ __check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se,
                          struct sched_entity *curr, unsigned long granularity)
 {
        s64 __delta = curr->fair_key - se->fair_key;
+       unsigned long ideal_runtime, delta_exec;
+
+       /*
+        * ideal_runtime is compared against sum_exec_runtime, which is
+        * walltime, hence do not scale.
+        */
+       ideal_runtime = max(sysctl_sched_latency / cfs_rq->nr_running,
+                       (unsigned long)sysctl_sched_min_granularity);
+
+       /*
+        * If we executed more than what the latency constraint suggests,
+        * reduce the rescheduling granularity. This way the total latency
+        * of how much a task is not scheduled converges to
+        * sysctl_sched_latency:
+        */
+       delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
+       if (delta_exec > ideal_runtime)
+               granularity = 0;
 
        /*
         * Take scheduling granularity into account - do not
         * preempt the current task unless the best task has
         * a larger than sched_granularity fairness advantage:
+        *
+        * scale granularity as key space is in fair_clock.
         */
        if (__delta > niced_granularity(curr, granularity))
                resched_task(rq_of(cfs_rq)->curr);
@@ -731,7 +751,6 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
 
 static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
 {
-       unsigned long gran, ideal_runtime, delta_exec;
        struct sched_entity *next;
 
        /*
@@ -748,21 +767,8 @@ static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
        if (next == curr)
                return;
 
-       gran = sched_granularity(cfs_rq);
-       ideal_runtime = niced_granularity(curr,
-               max(sysctl_sched_latency / cfs_rq->nr_running,
-                   (unsigned long)sysctl_sched_min_granularity));
-       /*
-        * If we executed more than what the latency constraint suggests,
-        * reduce the rescheduling granularity. This way the total latency
-        * of how much a task is not scheduled converges to
-        * sysctl_sched_latency:
-        */
-       delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
-       if (delta_exec > ideal_runtime)
-               gran = 0;
-
-       __check_preempt_curr_fair(cfs_rq, next, curr, gran);
+       __check_preempt_curr_fair(cfs_rq, next, curr,
+                       sched_granularity(cfs_rq));
 }
 
 /**************************************************