include/linux/bcd.h: remove comments
[safe/jmp/linux-2.6] / kernel / sched_fair.c
index 0d197be..18fd171 100644 (file)
@@ -409,64 +409,6 @@ static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
 }
 
 /*
- * The goal of calc_delta_asym() is to be asymmetrically around NICE_0_LOAD, in
- * that it favours >=0 over <0.
- *
- *   -20         |
- *               |
- *     0 --------+-------
- *             .'
- *    19     .'
- *
- */
-static unsigned long
-calc_delta_asym(unsigned long delta, struct sched_entity *se)
-{
-       struct load_weight lw = {
-               .weight = NICE_0_LOAD,
-               .inv_weight = 1UL << (WMULT_SHIFT-NICE_0_SHIFT)
-       };
-
-       for_each_sched_entity(se) {
-               struct load_weight *se_lw = &se->load;
-               unsigned long rw = cfs_rq_of(se)->load.weight;
-
-#ifdef CONFIG_FAIR_SCHED_GROUP
-               struct cfs_rq *cfs_rq = se->my_q;
-               struct task_group *tg = NULL
-
-               if (cfs_rq)
-                       tg = cfs_rq->tg;
-
-               if (tg && tg->shares < NICE_0_LOAD) {
-                       /*
-                        * scale shares to what it would have been had
-                        * tg->weight been NICE_0_LOAD:
-                        *
-                        *   weight = 1024 * shares / tg->weight
-                        */
-                       lw.weight *= se->load.weight;
-                       lw.weight /= tg->shares;
-
-                       lw.inv_weight = 0;
-
-                       se_lw = &lw;
-                       rw += lw.weight - se->load.weight;
-               } else
-#endif
-
-               if (se->load.weight < NICE_0_LOAD) {
-                       se_lw = &lw;
-                       rw += NICE_0_LOAD - se->load.weight;
-               }
-
-               delta = calc_delta_mine(delta, rw, se_lw);
-       }
-
-       return delta;
-}
-
-/*
  * Update the current task's runtime statistics. Skip current tasks that
  * are not in our scheduling class.
  */
@@ -586,11 +528,12 @@ account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
        update_load_add(&cfs_rq->load, se->load.weight);
        if (!parent_entity(se))
                inc_cpu_load(rq_of(cfs_rq), se->load.weight);
-       if (entity_is_task(se))
+       if (entity_is_task(se)) {
                add_cfs_task_weight(cfs_rq, se->load.weight);
+               list_add(&se->group_node, &cfs_rq->tasks);
+       }
        cfs_rq->nr_running++;
        se->on_rq = 1;
-       list_add(&se->group_node, &cfs_rq->tasks);
 }
 
 static void
@@ -599,11 +542,12 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
        update_load_sub(&cfs_rq->load, se->load.weight);
        if (!parent_entity(se))
                dec_cpu_load(rq_of(cfs_rq), se->load.weight);
-       if (entity_is_task(se))
+       if (entity_is_task(se)) {
                add_cfs_task_weight(cfs_rq, -se->load.weight);
+               list_del_init(&se->group_node);
+       }
        cfs_rq->nr_running--;
        se->on_rq = 0;
-       list_del_init(&se->group_node);
 }
 
 static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
@@ -726,21 +670,6 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
                __enqueue_entity(cfs_rq, se);
 }
 
-static void update_avg(u64 *avg, u64 sample)
-{
-       s64 diff = sample - *avg;
-       *avg += diff >> 3;
-}
-
-static void update_avg_stats(struct cfs_rq *cfs_rq, struct sched_entity *se)
-{
-       if (!se->last_wakeup)
-               return;
-
-       update_avg(&se->avg_overlap, se->sum_exec_runtime - se->last_wakeup);
-       se->last_wakeup = 0;
-}
-
 static void
 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
 {
@@ -751,7 +680,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
 
        update_stats_dequeue(cfs_rq, se);
        if (sleep) {
-               update_avg_stats(cfs_rq, se);
 #ifdef CONFIG_SCHEDSTATS
                if (entity_is_task(se)) {
                        struct task_struct *tsk = task_of(se);
@@ -894,7 +822,6 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
 #ifdef CONFIG_SCHED_HRTICK
 static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
 {
-       int requeue = rq->curr == p;
        struct sched_entity *se = &p->se;
        struct cfs_rq *cfs_rq = cfs_rq_of(se);
 
@@ -915,13 +842,13 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
                 * Don't schedule slices shorter than 10000ns, that just
                 * doesn't make sense. Rely on vruntime for fairness.
                 */
-               if (!requeue)
-                       delta = max(10000LL, delta);
+               if (rq->curr != p)
+                       delta = max_t(s64, 10000LL, delta);
 
-               hrtick_start(rq, delta, requeue);
+               hrtick_start(rq, delta);
        }
 }
-#else
+#else /* !CONFIG_SCHED_HRTICK */
 static inline void
 hrtick_start_fair(struct rq *rq, struct task_struct *p)
 {
@@ -1020,6 +947,8 @@ static void yield_task_fair(struct rq *rq)
  * not idle and an idle cpu is available.  The span of cpus to
  * search starts with cpus closest then further out as needed,
  * so we always favor a closer, idle cpu.
+ * Domains may include CPUs that are not usable for migration,
+ * hence we need to mask them out (cpu_active_map)
  *
  * Returns the CPU we should wake onto.
  */
@@ -1047,7 +976,8 @@ static int wake_idle(int cpu, struct task_struct *p)
                    || ((sd->flags & SD_WAKE_IDLE_FAR)
                        && !task_hot(p, task_rq(p)->clock, sd))) {
                        cpus_and(tmp, sd->span, p->cpus_allowed);
-                       for_each_cpu_mask(i, tmp) {
+                       cpus_and(tmp, tmp, cpu_active_map);
+                       for_each_cpu_mask_nr(i, tmp) {
                                if (idle_cpu(i)) {
                                        if (i != task_cpu(p)) {
                                                schedstat_inc(p,
@@ -1062,7 +992,7 @@ static int wake_idle(int cpu, struct task_struct *p)
        }
        return cpu;
 }
-#else
+#else /* !ARCH_HAS_SCHED_WAKE_IDLE*/
 static inline int wake_idle(int cpu, struct task_struct *p)
 {
        return cpu;
@@ -1074,95 +1004,142 @@ static inline int wake_idle(int cpu, struct task_struct *p)
 static const struct sched_class fair_sched_class;
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
-static unsigned long effective_load(struct task_group *tg, long wl, int cpu)
+/*
+ * effective_load() calculates the load change as seen from the root_task_group
+ *
+ * Adding load to a group doesn't make a group heavier, but can cause movement
+ * of group shares between cpus. Assuming the shares were perfectly aligned one
+ * can calculate the shift in shares.
+ *
+ * The problem is that perfectly aligning the shares is rather expensive, hence
+ * we try to avoid doing that too often - see update_shares(), which ratelimits
+ * this change.
+ *
+ * We compensate this by not only taking the current delta into account, but
+ * also considering the delta between when the shares were last adjusted and
+ * now.
+ *
+ * We still saw a performance dip, some tracing learned us that between
+ * cgroup:/ and cgroup:/foo balancing the number of affine wakeups increased
+ * significantly. Therefore try to bias the error in direction of failing
+ * the affine wakeup.
+ *
+ */
+static long effective_load(struct task_group *tg, int cpu,
+               long wl, long wg)
 {
        struct sched_entity *se = tg->se[cpu];
-       long wg = wl;
 
-       for_each_sched_entity(se) {
-#define D(n) (likely(n) ? (n) : 1)
+       if (!tg->parent)
+               return wl;
+
+       /*
+        * By not taking the decrease of shares on the other cpu into
+        * account our error leans towards reducing the affine wakeups.
+        */
+       if (!wl && sched_feat(ASYM_EFF_LOAD))
+               return wl;
 
+       for_each_sched_entity(se) {
                long S, rw, s, a, b;
+               long more_w;
+
+               /*
+                * Instead of using this increment, also add the difference
+                * between when the shares were last updated and now.
+                */
+               more_w = se->my_q->load.weight - se->my_q->rq_weight;
+               wl += more_w;
+               wg += more_w;
 
                S = se->my_q->tg->shares;
                s = se->my_q->shares;
-               rw = se->my_q->load.weight;
+               rw = se->my_q->rq_weight;
 
                a = S*(rw + wl);
                b = S*rw + s*wg;
 
-               wl = s*(a-b)/D(b);
+               wl = s*(a-b);
+
+               if (likely(b))
+                       wl /= b;
+
+               /*
+                * Assume the group is already running and will
+                * thus already be accounted for in the weight.
+                *
+                * That is, moving shares between CPUs, does not
+                * alter the group weight.
+                */
                wg = 0;
-#undef D
        }
 
        return wl;
 }
 
-static unsigned long task_load_sub(struct task_struct *p)
-{
-       return effective_load(task_group(p), -(long)p->se.load.weight, task_cpu(p));
-}
-
-static unsigned long task_load_add(struct task_struct *p, int cpu)
-{
-       return effective_load(task_group(p), p->se.load.weight, cpu);
-}
-
 #else
 
-static unsigned long task_load_sub(struct task_struct *p)
+static inline unsigned long effective_load(struct task_group *tg, int cpu,
+               unsigned long wl, unsigned long wg)
 {
-       return -p->se.load.weight;
-}
-
-static unsigned long task_load_add(struct task_struct *p, int cpu)
-{
-       return p->se.load.weight;
+       return wl;
 }
 
 #endif
 
 static int
-wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
+wake_affine(struct sched_domain *this_sd, struct rq *this_rq,
            struct task_struct *p, int prev_cpu, int this_cpu, int sync,
            int idx, unsigned long load, unsigned long this_load,
            unsigned int imbalance)
 {
        struct task_struct *curr = this_rq->curr;
+       struct task_group *tg;
        unsigned long tl = this_load;
        unsigned long tl_per_task;
+       unsigned long weight;
        int balanced;
 
        if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS))
                return 0;
 
+       if (!sync && sched_feat(SYNC_WAKEUPS) &&
+           curr->se.avg_overlap < sysctl_sched_migration_cost &&
+           p->se.avg_overlap < sysctl_sched_migration_cost)
+               sync = 1;
+
        /*
         * If sync wakeup then subtract the (maximum possible)
         * effect of the currently running task from the load
         * of the current CPU:
         */
-       if (sync)
-               tl += task_load_sub(current);
+       if (sync) {
+               tg = task_group(current);
+               weight = current->se.load.weight;
+
+               tl += effective_load(tg, this_cpu, -weight, -weight);
+               load += effective_load(tg, prev_cpu, 0, -weight);
+       }
 
-       balanced = 100*(tl + task_load_add(p, this_cpu)) <= imbalance*load;
+       tg = task_group(p);
+       weight = p->se.load.weight;
+
+       balanced = 100*(tl + effective_load(tg, this_cpu, weight, weight)) <=
+               imbalance*(load + effective_load(tg, prev_cpu, 0, weight));
 
        /*
         * If the currently running task will sleep within
         * a reasonable amount of time then attract this newly
         * woken task:
         */
-       if (sync && balanced && curr->sched_class == &fair_sched_class) {
-               if (curr->se.avg_overlap < sysctl_sched_migration_cost &&
-                               p->se.avg_overlap < sysctl_sched_migration_cost)
-                       return 1;
-       }
+       if (sync && balanced)
+               return 1;
 
        schedstat_inc(p, se.nr_wakeups_affine_attempts);
        tl_per_task = cpu_avg_load_per_task(this_cpu);
 
-       if ((tl <= load && tl + target_load(prev_cpu, idx) <= tl_per_task) ||
-                       balanced) {
+       if (balanced || (tl <= load && tl + target_load(prev_cpu, idx) <=
+                       tl_per_task)) {
                /*
                 * This domain has SD_WAKE_AFFINE and
                 * p is cache cold in this domain, and
@@ -1181,16 +1158,17 @@ static int select_task_rq_fair(struct task_struct *p, int sync)
        struct sched_domain *sd, *this_sd = NULL;
        int prev_cpu, this_cpu, new_cpu;
        unsigned long load, this_load;
-       struct rq *rq, *this_rq;
+       struct rq *this_rq;
        unsigned int imbalance;
        int idx;
 
        prev_cpu        = task_cpu(p);
-       rq              = task_rq(p);
        this_cpu        = smp_processor_id();
        this_rq         = cpu_rq(this_cpu);
        new_cpu         = prev_cpu;
 
+       if (prev_cpu == this_cpu)
+               goto out;
        /*
         * 'this_sd' is the first domain that both
         * this_cpu and prev_cpu are present in:
@@ -1218,13 +1196,10 @@ static int select_task_rq_fair(struct task_struct *p, int sync)
        load = source_load(prev_cpu, idx);
        this_load = target_load(this_cpu, idx);
 
-       if (wake_affine(rq, this_sd, this_rq, p, prev_cpu, this_cpu, sync, idx,
+       if (wake_affine(this_sd, this_rq, p, prev_cpu, this_cpu, sync, idx,
                                     load, this_load, imbalance))
                return this_cpu;
 
-       if (prev_cpu == this_cpu)
-               goto out;
-
        /*
         * Start passive balancing when half the imbalance_pct
         * limit is reached.
@@ -1251,62 +1226,20 @@ static unsigned long wakeup_gran(struct sched_entity *se)
         * + nice tasks.
         */
        if (sched_feat(ASYM_GRAN))
-               gran = calc_delta_asym(sysctl_sched_wakeup_granularity, se);
-       else
-               gran = calc_delta_fair(sysctl_sched_wakeup_granularity, se);
+               gran = calc_delta_mine(gran, NICE_0_LOAD, &se->load);
 
        return gran;
 }
 
 /*
- * Should 'se' preempt 'curr'.
- *
- *             |s1
- *        |s2
- *   |s3
- *         g
- *      |<--->|c
- *
- *  w(c, s1) = -1
- *  w(c, s2) =  0
- *  w(c, s3) =  1
- *
- */
-static int
-wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
-{
-       s64 gran, vdiff = curr->vruntime - se->vruntime;
-
-       if (vdiff < 0)
-               return -1;
-
-       gran = wakeup_gran(curr);
-       if (vdiff > gran)
-               return 1;
-
-       return 0;
-}
-
-/* return depth at which a sched entity is present in the hierarchy */
-static inline int depth_se(struct sched_entity *se)
-{
-       int depth = 0;
-
-       for_each_sched_entity(se)
-               depth++;
-
-       return depth;
-}
-
-/*
  * Preempt the current task with a newly woken task if needed:
  */
-static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
+static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
 {
        struct task_struct *curr = rq->curr;
        struct cfs_rq *cfs_rq = task_cfs_rq(curr);
        struct sched_entity *se = &curr->se, *pse = &p->se;
-       int se_depth, pse_depth;
+       s64 delta_exec;
 
        if (unlikely(rt_prio(p->prio))) {
                update_rq_clock(rq);
@@ -1315,13 +1248,19 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
                return;
        }
 
-       se->last_wakeup = se->sum_exec_runtime;
        if (unlikely(se == pse))
                return;
 
        cfs_rq_of(pse)->next = pse;
 
        /*
+        * We can come here with TIF_NEED_RESCHED already set from new task
+        * wake up path.
+        */
+       if (test_tsk_need_resched(curr))
+               return;
+
+       /*
         * Batch tasks do not preempt (their preemption is driven by
         * the tick):
         */
@@ -1331,33 +1270,15 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
        if (!sched_feat(WAKEUP_PREEMPT))
                return;
 
-       /*
-        * preemption test can be made between sibling entities who are in the
-        * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
-        * both tasks until we find their ancestors who are siblings of common
-        * parent.
-        */
-
-       /* First walk up until both entities are at same depth */
-       se_depth = depth_se(se);
-       pse_depth = depth_se(pse);
-
-       while (se_depth > pse_depth) {
-               se_depth--;
-               se = parent_entity(se);
-       }
-
-       while (pse_depth > se_depth) {
-               pse_depth--;
-               pse = parent_entity(pse);
-       }
-
-       while (!is_same_group(se, pse)) {
-               se = parent_entity(se);
-               pse = parent_entity(pse);
+       if (sched_feat(WAKEUP_OVERLAP) && (sync ||
+                       (se->avg_overlap < sysctl_sched_migration_cost &&
+                        pse->avg_overlap < sysctl_sched_migration_cost))) {
+               resched_task(curr);
+               return;
        }
 
-       if (wakeup_preempt_entity(se, pse) == 1)
+       delta_exec = se->sum_exec_runtime - se->prev_sum_exec_runtime;
+       if (delta_exec > wakeup_gran(pse))
                resched_task(curr);
 }
 
@@ -1413,18 +1334,13 @@ __load_balance_iterator(struct cfs_rq *cfs_rq, struct list_head *next)
        struct task_struct *p = NULL;
        struct sched_entity *se;
 
-       while (next != &cfs_rq->tasks) {
-               se = list_entry(next, struct sched_entity, group_node);
-               next = next->next;
+       if (next == &cfs_rq->tasks)
+               return NULL;
 
-               /* Skip over entities that are not tasks */
-               if (entity_is_task(se)) {
-                       p = task_of(se);
-                       break;
-               }
-       }
+       se = list_entry(next, struct sched_entity, group_node);
+       p = task_of(se);
+       cfs_rq->balance_iterator = next->next;
 
-       cfs_rq->balance_iterator = next;
        return p;
 }
 
@@ -1473,11 +1389,11 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
        rcu_read_lock();
        update_h_load(busiest_cpu);
 
-       list_for_each_entry(tg, &task_groups, list) {
+       list_for_each_entry_rcu(tg, &task_groups, list) {
                struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu];
                unsigned long busiest_h_load = busiest_cfs_rq->h_load;
                unsigned long busiest_weight = busiest_cfs_rq->load.weight;
-               long rem_load, moved_load;
+               u64 rem_load, moved_load;
 
                /*
                 * empty group
@@ -1485,8 +1401,8 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
                if (!busiest_cfs_rq->task_weight)
                        continue;
 
-               rem_load = rem_load_move * busiest_weight;
-               rem_load /= busiest_h_load + 1;
+               rem_load = (u64)rem_load_move * busiest_weight;
+               rem_load = div_u64(rem_load, busiest_h_load + 1);
 
                moved_load = __load_balance_fair(this_rq, this_cpu, busiest,
                                rem_load, sd, idle, all_pinned, this_best_prio,
@@ -1496,7 +1412,7 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
                        continue;
 
                moved_load *= busiest_h_load;
-               moved_load /= busiest_weight + 1;
+               moved_load = div_u64(moved_load, busiest_weight + 1);
 
                rem_load_move -= moved_load;
                if (rem_load_move < 0)
@@ -1542,7 +1458,7 @@ move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
 
        return 0;
 }
-#endif
+#endif /* CONFIG_SMP */
 
 /*
  * scheduler tick hitting a task of our scheduling class:
@@ -1586,10 +1502,10 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
                 * 'current' within the tree based on its new key value.
                 */
                swap(curr->vruntime, se->vruntime);
+               resched_task(rq->curr);
        }
 
        enqueue_task_fair(rq, p, 0);
-       resched_task(rq->curr);
 }
 
 /*
@@ -1608,7 +1524,7 @@ static void prio_changed_fair(struct rq *rq, struct task_struct *p,
                if (p->prio > oldprio)
                        resched_task(rq->curr);
        } else
-               check_preempt_curr(rq, p);
+               check_preempt_curr(rq, p, 0);
 }
 
 /*
@@ -1625,7 +1541,7 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p,
        if (running)
                resched_task(rq->curr);
        else
-               check_preempt_curr(rq, p);
+               check_preempt_curr(rq, p, 0);
 }
 
 /* Account for a task changing its policy or group.