trace: set max latency variable to zero on default
[safe/jmp/linux-2.6] / kernel / sched_fair.c
index 5ad4440..5cc1c16 100644 (file)
@@ -283,7 +283,7 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
                                                   struct sched_entity,
                                                   run_node);
 
-               if (vruntime == cfs_rq->min_vruntime)
+               if (!cfs_rq->curr)
                        vruntime = se->vruntime;
                else
                        vruntime = min_vruntime(vruntime, se->vruntime);
@@ -386,20 +386,6 @@ int sched_nr_latency_handler(struct ctl_table *table, int write,
 #endif
 
 /*
- * delta *= P[w / rw]
- */
-static inline unsigned long
-calc_delta_weight(unsigned long delta, struct sched_entity *se)
-{
-       for_each_sched_entity(se) {
-               delta = calc_delta_mine(delta,
-                               se->load.weight, &cfs_rq_of(se)->load);
-       }
-
-       return delta;
-}
-
-/*
  * delta /= w
  */
 static inline unsigned long
@@ -440,12 +426,23 @@ static u64 __sched_period(unsigned long nr_running)
  */
 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
-       unsigned long nr_running = cfs_rq->nr_running;
+       u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
 
-       if (unlikely(!se->on_rq))
-               nr_running++;
+       for_each_sched_entity(se) {
+               struct load_weight *load;
 
-       return calc_delta_weight(__sched_period(nr_running), se);
+               cfs_rq = cfs_rq_of(se);
+               load = &cfs_rq->load;
+
+               if (unlikely(!se->on_rq)) {
+                       struct load_weight lw = cfs_rq->load;
+
+                       update_load_add(&lw, se->load.weight);
+                       load = &lw;
+               }
+               slice = calc_delta_mine(slice, se->load.weight, load);
+       }
+       return slice;
 }
 
 /*
@@ -683,9 +680,13 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
                        unsigned long thresh = sysctl_sched_latency;
 
                        /*
-                        * convert the sleeper threshold into virtual time
+                        * Convert the sleeper threshold into virtual time.
+                        * SCHED_IDLE is a special sub-class.  We care about
+                        * fairness only relative to other SCHED_IDLE tasks,
+                        * all of which have the same weight.
                         */
-                       if (sched_feat(NORMALIZED_SLEEPER))
+                       if (sched_feat(NORMALIZED_SLEEPER) &&
+                                       task_of(se)->policy != SCHED_IDLE)
                                thresh = calc_delta_fair(thresh, se);
 
                        vruntime -= thresh;
@@ -1019,16 +1020,33 @@ static void yield_task_fair(struct rq *rq)
  * search starts with cpus closest then further out as needed,
  * so we always favor a closer, idle cpu.
  * Domains may include CPUs that are not usable for migration,
- * hence we need to mask them out (cpu_active_map)
+ * hence we need to mask them out (cpu_active_mask)
  *
  * Returns the CPU we should wake onto.
  */
 #if defined(ARCH_HAS_SCHED_WAKE_IDLE)
 static int wake_idle(int cpu, struct task_struct *p)
 {
-       cpumask_t tmp;
        struct sched_domain *sd;
        int i;
+       unsigned int chosen_wakeup_cpu;
+       int this_cpu;
+
+       /*
+        * At POWERSAVINGS_BALANCE_WAKEUP level, if both this_cpu and prev_cpu
+        * are idle and this is not a kernel thread and this task's affinity
+        * allows it to be moved to preferred cpu, then just move!
+        */
+
+       this_cpu = smp_processor_id();
+       chosen_wakeup_cpu =
+               cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu;
+
+       if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP &&
+               idle_cpu(cpu) && idle_cpu(this_cpu) &&
+               p->mm && !(p->flags & PF_KTHREAD) &&
+               cpu_isset(chosen_wakeup_cpu, p->cpus_allowed))
+               return chosen_wakeup_cpu;
 
        /*
         * If it is idle, then it is the best cpu to run this task.
@@ -1046,10 +1064,9 @@ static int wake_idle(int cpu, struct task_struct *p)
                if ((sd->flags & SD_WAKE_IDLE)
                    || ((sd->flags & SD_WAKE_IDLE_FAR)
                        && !task_hot(p, task_rq(p)->clock, sd))) {
-                       cpus_and(tmp, sd->span, p->cpus_allowed);
-                       cpus_and(tmp, tmp, cpu_active_map);
-                       for_each_cpu_mask_nr(i, tmp) {
-                               if (idle_cpu(i)) {
+                       for_each_cpu_and(i, sched_domain_span(sd),
+                                        &p->cpus_allowed) {
+                               if (cpu_active(i) && idle_cpu(i)) {
                                        if (i != task_cpu(p)) {
                                                schedstat_inc(p,
                                                       se.nr_wakeups_idle);
@@ -1242,13 +1259,13 @@ static int select_task_rq_fair(struct task_struct *p, int sync)
         * this_cpu and prev_cpu are present in:
         */
        for_each_domain(this_cpu, sd) {
-               if (cpu_isset(prev_cpu, sd->span)) {
+               if (cpumask_test_cpu(prev_cpu, sched_domain_span(sd))) {
                        this_sd = sd;
                        break;
                }
        }
 
-       if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
+       if (unlikely(!cpumask_test_cpu(this_cpu, &p->cpus_allowed)))
                goto out;
 
        /*
@@ -1330,14 +1347,18 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
 
 static void set_last_buddy(struct sched_entity *se)
 {
-       for_each_sched_entity(se)
-               cfs_rq_of(se)->last = se;
+       if (likely(task_of(se)->policy != SCHED_IDLE)) {
+               for_each_sched_entity(se)
+                       cfs_rq_of(se)->last = se;
+       }
 }
 
 static void set_next_buddy(struct sched_entity *se)
 {
-       for_each_sched_entity(se)
-               cfs_rq_of(se)->next = se;
+       if (likely(task_of(se)->policy != SCHED_IDLE)) {
+               for_each_sched_entity(se)
+                       cfs_rq_of(se)->next = se;
+       }
 }
 
 /*
@@ -1383,12 +1404,18 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
                return;
 
        /*
-        * Batch tasks do not preempt (their preemption is driven by
+        * Batch and idle tasks do not preempt (their preemption is driven by
         * the tick):
         */
-       if (unlikely(p->policy == SCHED_BATCH))
+       if (unlikely(p->policy != SCHED_NORMAL))
                return;
 
+       /* Idle tasks are by definition preempted by everybody. */
+       if (unlikely(curr->policy == SCHED_IDLE)) {
+               resched_task(curr);
+               return;
+       }
+
        if (!sched_feat(WAKEUP_PREEMPT))
                return;
 
@@ -1607,8 +1634,6 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
        }
 }
 
-#define swap(a, b) do { typeof(a) tmp = (a); (a) = (b); (b) = tmp; } while (0)
-
 /*
  * Share the fairness runtime between parent and child, thus the
  * total amount of pressure for CPU stays equal - new tasks