sched: Complete buddy switches
[safe/jmp/linux-2.6] / kernel / sched_fair.c
index 56c0efe..4f6356e 100644 (file)
@@ -24,7 +24,7 @@
 
 /*
  * Targeted preemption latency for CPU-bound tasks:
- * (default: 20ms * (1 + ilog(ncpus)), units: nanoseconds)
+ * (default: 5ms * (1 + ilog(ncpus)), units: nanoseconds)
  *
  * NOTE: this latency value is not the same as the concept of
  * 'timeslice length' - timeslices in CFS are of variable length
  * (to see the precise effective timeslice length of your workload,
  *  run vmstat and monitor the context-switches (cs) field)
  */
-unsigned int sysctl_sched_latency = 20000000ULL;
+unsigned int sysctl_sched_latency = 5000000ULL;
 
 /*
  * Minimal preemption granularity for CPU-bound tasks:
- * (default: 4 msec * (1 + ilog(ncpus)), units: nanoseconds)
+ * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
  */
-unsigned int sysctl_sched_min_granularity = 4000000ULL;
+unsigned int sysctl_sched_min_granularity = 1000000ULL;
 
 /*
  * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
@@ -48,10 +48,10 @@ unsigned int sysctl_sched_min_granularity = 4000000ULL;
 static unsigned int sched_nr_latency = 5;
 
 /*
- * After fork, child runs first. (default) If set to 0 then
+ * After fork, child runs first. If set to 0 (default) then
  * parent will (try to) run first.
  */
-const_debug unsigned int sysctl_sched_child_runs_first = 1;
+unsigned int sysctl_sched_child_runs_first __read_mostly;
 
 /*
  * sys_sched_yield() compat mode
@@ -63,13 +63,13 @@ unsigned int __read_mostly sysctl_sched_compat_yield;
 
 /*
  * SCHED_OTHER wake-up granularity.
- * (default: 5 msec * (1 + ilog(ncpus)), units: nanoseconds)
+ * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
  *
  * This option delays the preemption effects of decoupled workloads
  * and reduces their over-scheduling. Synchronous workloads will still
  * have immediate wakeup/sleep latencies.
  */
-unsigned int sysctl_sched_wakeup_granularity = 5000000UL;
+unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
 
 const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
 
@@ -79,11 +79,6 @@ static const struct sched_class fair_sched_class;
  * CFS operations on generic schedulable entities:
  */
 
-static inline struct task_struct *task_of(struct sched_entity *se)
-{
-       return container_of(se, struct task_struct, se);
-}
-
 #ifdef CONFIG_FAIR_GROUP_SCHED
 
 /* cpu runqueue to which this cfs_rq is attached */
@@ -95,6 +90,14 @@ static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
 /* An entity is a task if it doesn't "own" a runqueue */
 #define entity_is_task(se)     (!se->my_q)
 
+static inline struct task_struct *task_of(struct sched_entity *se)
+{
+#ifdef CONFIG_SCHED_DEBUG
+       WARN_ON_ONCE(!entity_is_task(se));
+#endif
+       return container_of(se, struct task_struct, se);
+}
+
 /* Walk up scheduling entities hierarchy */
 #define for_each_sched_entity(se) \
                for (; se; se = se->parent)
@@ -186,7 +189,12 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse)
        }
 }
 
-#else  /* CONFIG_FAIR_GROUP_SCHED */
+#else  /* !CONFIG_FAIR_GROUP_SCHED */
+
+static inline struct task_struct *task_of(struct sched_entity *se)
+{
+       return container_of(se, struct task_struct, se);
+}
 
 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
 {
@@ -266,6 +274,12 @@ static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
        return min_vruntime;
 }
 
+static inline int entity_before(struct sched_entity *a,
+                               struct sched_entity *b)
+{
+       return (s64)(a->vruntime - b->vruntime) < 0;
+}
+
 static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
        return se->vruntime - cfs_rq->min_vruntime;
@@ -283,7 +297,7 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
                                                   struct sched_entity,
                                                   run_node);
 
-               if (vruntime == cfs_rq->min_vruntime)
+               if (!cfs_rq->curr)
                        vruntime = se->vruntime;
                else
                        vruntime = min_vruntime(vruntime, se->vruntime);
@@ -386,20 +400,6 @@ int sched_nr_latency_handler(struct ctl_table *table, int write,
 #endif
 
 /*
- * delta *= P[w / rw]
- */
-static inline unsigned long
-calc_delta_weight(unsigned long delta, struct sched_entity *se)
-{
-       for_each_sched_entity(se) {
-               delta = calc_delta_mine(delta,
-                               se->load.weight, &cfs_rq_of(se)->load);
-       }
-
-       return delta;
-}
-
-/*
  * delta /= w
  */
 static inline unsigned long
@@ -440,12 +440,24 @@ static u64 __sched_period(unsigned long nr_running)
  */
 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
-       unsigned long nr_running = cfs_rq->nr_running;
+       u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
 
-       if (unlikely(!se->on_rq))
-               nr_running++;
+       for_each_sched_entity(se) {
+               struct load_weight *load;
+               struct load_weight lw;
+
+               cfs_rq = cfs_rq_of(se);
+               load = &cfs_rq->load;
 
-       return calc_delta_weight(__sched_period(nr_running), se);
+               if (unlikely(!se->on_rq)) {
+                       lw = cfs_rq->load;
+
+                       update_load_add(&lw, se->load.weight);
+                       load = &lw;
+               }
+               slice = calc_delta_mine(slice, se->load.weight, load);
+       }
+       return slice;
 }
 
 /*
@@ -533,6 +545,12 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
        schedstat_set(se->wait_count, se->wait_count + 1);
        schedstat_set(se->wait_sum, se->wait_sum +
                        rq_of(cfs_rq)->clock - se->wait_start);
+#ifdef CONFIG_SCHEDSTATS
+       if (entity_is_task(se)) {
+               trace_sched_stat_wait(task_of(se),
+                       rq_of(cfs_rq)->clock - se->wait_start);
+       }
+#endif
        schedstat_set(se->wait_start, 0);
 }
 
@@ -607,9 +625,13 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
 static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
 #ifdef CONFIG_SCHEDSTATS
+       struct task_struct *tsk = NULL;
+
+       if (entity_is_task(se))
+               tsk = task_of(se);
+
        if (se->sleep_start) {
                u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
-               struct task_struct *tsk = task_of(se);
 
                if ((s64)delta < 0)
                        delta = 0;
@@ -620,11 +642,13 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
                se->sleep_start = 0;
                se->sum_sleep_runtime += delta;
 
-               account_scheduler_latency(tsk, delta >> 10, 1);
+               if (tsk) {
+                       account_scheduler_latency(tsk, delta >> 10, 1);
+                       trace_sched_stat_sleep(tsk, delta);
+               }
        }
        if (se->block_start) {
                u64 delta = rq_of(cfs_rq)->clock - se->block_start;
-               struct task_struct *tsk = task_of(se);
 
                if ((s64)delta < 0)
                        delta = 0;
@@ -635,17 +659,25 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
                se->block_start = 0;
                se->sum_sleep_runtime += delta;
 
-               /*
-                * Blocking time is in units of nanosecs, so shift by 20 to
-                * get a milliseconds-range estimation of the amount of
-                * time that the task spent sleeping:
-                */
-               if (unlikely(prof_on == SLEEP_PROFILING)) {
+               if (tsk) {
+                       if (tsk->in_iowait) {
+                               se->iowait_sum += delta;
+                               se->iowait_count++;
+                               trace_sched_stat_iowait(tsk, delta);
+                       }
 
-                       profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk),
-                                    delta >> 20);
+                       /*
+                        * Blocking time is in units of nanosecs, so shift by
+                        * 20 to get a milliseconds-range estimation of the
+                        * amount of time that the task spent sleeping:
+                        */
+                       if (unlikely(prof_on == SLEEP_PROFILING)) {
+                               profile_hits(SLEEP_PROFILING,
+                                               (void *)get_wchan(tsk),
+                                               delta >> 20);
+                       }
+                       account_scheduler_latency(tsk, delta >> 10, 0);
                }
-               account_scheduler_latency(tsk, delta >> 10, 0);
        }
 #endif
 }
@@ -683,18 +715,23 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
                        unsigned long thresh = sysctl_sched_latency;
 
                        /*
-                        * convert the sleeper threshold into virtual time
+                        * Convert the sleeper threshold into virtual time.
+                        * SCHED_IDLE is a special sub-class.  We care about
+                        * fairness only relative to other SCHED_IDLE tasks,
+                        * all of which have the same weight.
                         */
-                       if (sched_feat(NORMALIZED_SLEEPER))
+                       if (sched_feat(NORMALIZED_SLEEPER) &&
+                                       (!entity_is_task(se) ||
+                                        task_of(se)->policy != SCHED_IDLE))
                                thresh = calc_delta_fair(thresh, se);
 
                        vruntime -= thresh;
                }
-
-               /* ensure we never gain time by being placed backwards. */
-               vruntime = max_vruntime(se->vruntime, vruntime);
        }
 
+       /* ensure we never gain time by being placed backwards. */
+       vruntime = max_vruntime(se->vruntime, vruntime);
+
        se->vruntime = vruntime;
 }
 
@@ -718,7 +755,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
                __enqueue_entity(cfs_rq, se);
 }
 
-static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
+static void __clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
        if (cfs_rq->last == se)
                cfs_rq->last = NULL;
@@ -727,6 +764,12 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
                cfs_rq->next = NULL;
 }
 
+static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+       for_each_sched_entity(se)
+               __clear_buddies(cfs_rq_of(se), se);
+}
+
 static void
 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
 {
@@ -767,8 +810,14 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
 
        ideal_runtime = sched_slice(cfs_rq, curr);
        delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
-       if (delta_exec > ideal_runtime)
+       if (delta_exec > ideal_runtime) {
                resched_task(rq_of(cfs_rq)->curr);
+               /*
+                * The current task ran long enough, ensure it doesn't get
+                * re-elected due to buddy favours.
+                */
+               clear_buddies(cfs_rq, curr);
+       }
 }
 
 static void
@@ -1002,7 +1051,7 @@ static void yield_task_fair(struct rq *rq)
        /*
         * Already in the rightmost position?
         */
-       if (unlikely(!rightmost || rightmost->vruntime < se->vruntime))
+       if (unlikely(!rightmost || entity_before(rightmost, se)))
                return;
 
        /*
@@ -1019,17 +1068,21 @@ static void yield_task_fair(struct rq *rq)
  * search starts with cpus closest then further out as needed,
  * so we always favor a closer, idle cpu.
  * Domains may include CPUs that are not usable for migration,
- * hence we need to mask them out (cpu_active_mask)
+ * hence we need to mask them out (rq->rd->online)
  *
  * Returns the CPU we should wake onto.
  */
 #if defined(ARCH_HAS_SCHED_WAKE_IDLE)
+
+#define cpu_rd_active(cpu, rq) cpumask_test_cpu(cpu, rq->rd->online)
+
 static int wake_idle(int cpu, struct task_struct *p)
 {
        struct sched_domain *sd;
        int i;
        unsigned int chosen_wakeup_cpu;
        int this_cpu;
+       struct rq *task_rq = task_rq(p);
 
        /*
         * At POWERSAVINGS_BALANCE_WAKEUP level, if both this_cpu and prev_cpu
@@ -1062,10 +1115,10 @@ static int wake_idle(int cpu, struct task_struct *p)
        for_each_domain(cpu, sd) {
                if ((sd->flags & SD_WAKE_IDLE)
                    || ((sd->flags & SD_WAKE_IDLE_FAR)
-                       && !task_hot(p, task_rq(p)->clock, sd))) {
+                       && !task_hot(p, task_rq->clock, sd))) {
                        for_each_cpu_and(i, sched_domain_span(sd),
                                         &p->cpus_allowed) {
-                               if (cpu_active(i) && idle_cpu(i)) {
+                               if (cpu_rd_active(i, task_rq) && idle_cpu(i)) {
                                        if (i != task_cpu(p)) {
                                                schedstat_inc(p,
                                                       se.nr_wakeups_idle);
@@ -1208,7 +1261,17 @@ wake_affine(struct sched_domain *this_sd, struct rq *this_rq,
        tg = task_group(p);
        weight = p->se.load.weight;
 
-       balanced = 100*(tl + effective_load(tg, this_cpu, weight, weight)) <=
+       /*
+        * In low-load situations, where prev_cpu is idle and this_cpu is idle
+        * due to the sync cause above having dropped tl to 0, we'll always have
+        * an imbalance, but there's really nothing you can do about that, so
+        * that's good too.
+        *
+        * Otherwise check if either cpus are near enough in load to allow this
+        * task to be woken on this_cpu.
+        */
+       balanced = !tl ||
+               100*(tl + effective_load(tg, this_cpu, weight, weight)) <=
                imbalance*(load + effective_load(tg, prev_cpu, 0, weight));
 
        /*
@@ -1251,8 +1314,6 @@ static int select_task_rq_fair(struct task_struct *p, int sync)
        this_rq         = cpu_rq(this_cpu);
        new_cpu         = prev_cpu;
 
-       if (prev_cpu == this_cpu)
-               goto out;
        /*
         * 'this_sd' is the first domain that both
         * this_cpu and prev_cpu are present in:
@@ -1301,16 +1362,63 @@ out:
 }
 #endif /* CONFIG_SMP */
 
-static unsigned long wakeup_gran(struct sched_entity *se)
+/*
+ * Adaptive granularity
+ *
+ * se->avg_wakeup gives the average time a task runs until it does a wakeup,
+ * with the limit of wakeup_gran -- when it never does a wakeup.
+ *
+ * So the smaller avg_wakeup is the faster we want this task to preempt,
+ * but we don't want to treat the preemptee unfairly and therefore allow it
+ * to run for at least the amount of time we'd like to run.
+ *
+ * NOTE: we use 2*avg_wakeup to increase the probability of actually doing one
+ *
+ * NOTE: we use *nr_running to scale with load, this nicely matches the
+ *       degrading latency on load.
+ */
+static unsigned long
+adaptive_gran(struct sched_entity *curr, struct sched_entity *se)
+{
+       u64 this_run = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
+       u64 expected_wakeup = 2*se->avg_wakeup * cfs_rq_of(se)->nr_running;
+       u64 gran = 0;
+
+       if (this_run < expected_wakeup)
+               gran = expected_wakeup - this_run;
+
+       return min_t(s64, gran, sysctl_sched_wakeup_granularity);
+}
+
+static unsigned long
+wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
 {
        unsigned long gran = sysctl_sched_wakeup_granularity;
 
+       if (cfs_rq_of(curr)->curr && sched_feat(ADAPTIVE_GRAN))
+               gran = adaptive_gran(curr, se);
+
        /*
-        * More easily preempt - nice tasks, while not making it harder for
-        * + nice tasks.
+        * Since its curr running now, convert the gran from real-time
+        * to virtual-time in his units.
         */
-       if (!sched_feat(ASYM_GRAN) || se->load.weight > NICE_0_LOAD)
-               gran = calc_delta_fair(sysctl_sched_wakeup_granularity, se);
+       if (sched_feat(ASYM_GRAN)) {
+               /*
+                * By using 'se' instead of 'curr' we penalize light tasks, so
+                * they get preempted easier. That is, if 'se' < 'curr' then
+                * the resulting gran will be larger, therefore penalizing the
+                * lighter, if otoh 'se' > 'curr' then the resulting gran will
+                * be smaller, again penalizing the lighter task.
+                *
+                * This is especially important for buddies when the leftmost
+                * task is higher priority than the buddy.
+                */
+               if (unlikely(se->load.weight != NICE_0_LOAD))
+                       gran = calc_delta_fair(gran, se);
+       } else {
+               if (unlikely(curr->load.weight != NICE_0_LOAD))
+                       gran = calc_delta_fair(gran, curr);
+       }
 
        return gran;
 }
@@ -1337,7 +1445,7 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
        if (vdiff <= 0)
                return -1;
 
-       gran = wakeup_gran(curr);
+       gran = wakeup_gran(curr, se);
        if (vdiff > gran)
                return 1;
 
@@ -1346,14 +1454,18 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
 
 static void set_last_buddy(struct sched_entity *se)
 {
-       for_each_sched_entity(se)
-               cfs_rq_of(se)->last = se;
+       if (likely(task_of(se)->policy != SCHED_IDLE)) {
+               for_each_sched_entity(se)
+                       cfs_rq_of(se)->last = se;
+       }
 }
 
 static void set_next_buddy(struct sched_entity *se)
 {
-       for_each_sched_entity(se)
-               cfs_rq_of(se)->next = se;
+       if (likely(task_of(se)->policy != SCHED_IDLE)) {
+               for_each_sched_entity(se)
+                       cfs_rq_of(se)->next = se;
+       }
 }
 
 /*
@@ -1389,7 +1501,8 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
         */
        if (sched_feat(LAST_BUDDY) && likely(se->on_rq && curr != rq->idle))
                set_last_buddy(se);
-       set_next_buddy(pse);
+       if (sched_feat(NEXT_BUDDY))
+               set_next_buddy(pse);
 
        /*
         * We can come here with TIF_NEED_RESCHED already set from new task
@@ -1399,35 +1512,35 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
                return;
 
        /*
-        * Batch tasks do not preempt (their preemption is driven by
+        * Batch and idle tasks do not preempt (their preemption is driven by
         * the tick):
         */
-       if (unlikely(p->policy == SCHED_BATCH))
+       if (unlikely(p->policy != SCHED_NORMAL))
                return;
 
+       /* Idle tasks are by definition preempted by everybody. */
+       if (unlikely(curr->policy == SCHED_IDLE)) {
+               resched_task(curr);
+               return;
+       }
+
        if (!sched_feat(WAKEUP_PREEMPT))
                return;
 
-       if (sched_feat(WAKEUP_OVERLAP) && (sync ||
-                       (se->avg_overlap < sysctl_sched_migration_cost &&
-                        pse->avg_overlap < sysctl_sched_migration_cost))) {
+       if ((sched_feat(WAKEUP_SYNC) && sync) ||
+           (sched_feat(WAKEUP_OVERLAP) &&
+            (se->avg_overlap < sysctl_sched_migration_cost &&
+             pse->avg_overlap < sysctl_sched_migration_cost))) {
                resched_task(curr);
                return;
        }
 
        find_matching_se(&se, &pse);
 
-       while (se) {
-               BUG_ON(!pse);
-
-               if (wakeup_preempt_entity(se, pse) == 1) {
-                       resched_task(curr);
-                       break;
-               }
+       BUG_ON(!pse);
 
-               se = parent_entity(se);
-               pse = parent_entity(pse);
-       }
+       if (wakeup_preempt_entity(se, pse) == 1)
+               resched_task(curr);
 }
 
 static struct task_struct *pick_next_task_fair(struct rq *rq)
@@ -1441,6 +1554,11 @@ static struct task_struct *pick_next_task_fair(struct rq *rq)
 
        do {
                se = pick_next_entity(cfs_rq);
+               /*
+                * If se was a buddy, clear it so that it will have to earn
+                * the favour again.
+                */
+               __clear_buddies(cfs_rq, se);
                set_next_entity(cfs_rq, se);
                cfs_rq = group_cfs_rq(se);
        } while (cfs_rq);
@@ -1623,8 +1741,6 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
        }
 }
 
-#define swap(a, b) do { typeof(a) tmp = (a); (a) = (b); (b) = tmp; } while (0)
-
 /*
  * Share the fairness runtime between parent and child, thus the
  * total amount of pressure for CPU stays equal - new tasks
@@ -1641,11 +1757,13 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
        sched_info_queued(p);
 
        update_curr(cfs_rq);
+       if (curr)
+               se->vruntime = curr->vruntime;
        place_entity(cfs_rq, se, 1);
 
        /* 'curr' will be NULL if the child belongs to a different group */
        if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) &&
-                       curr && curr->vruntime < se->vruntime) {
+                       curr && entity_before(curr, se)) {
                /*
                 * Upon rescheduling, sched_class::put_prev_task() will place
                 * 'current' within the tree based on its new key value.