xfs: remove nr_to_write writeback windup.
[safe/jmp/linux-2.6] / kernel / sched_fair.c
index 49ad993..eed35ed 100644 (file)
@@ -757,9 +757,6 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
        se->vruntime = vruntime;
 }
 
-#define ENQUEUE_WAKEUP 1
-#define ENQUEUE_MIGRATE 2
-
 static void
 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
 {
@@ -767,7 +764,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
         * Update the normalized vruntime before updating min_vruntime
         * through callig update_curr().
         */
-       if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATE))
+       if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
                se->vruntime += cfs_rq->min_vruntime;
 
        /*
@@ -803,7 +800,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
 }
 
 static void
-dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
+dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
 {
        /*
         * Update run-time statistics of the 'current'.
@@ -811,7 +808,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
        update_curr(cfs_rq);
 
        update_stats_dequeue(cfs_rq, se);
-       if (sleep) {
+       if (flags & DEQUEUE_SLEEP) {
 #ifdef CONFIG_SCHEDSTATS
                if (entity_is_task(se)) {
                        struct task_struct *tsk = task_of(se);
@@ -836,7 +833,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
         * update can refer to the ->curr item and we need to reflect this
         * movement in our normalized position.
         */
-       if (!sleep)
+       if (!(flags & DEQUEUE_SLEEP))
                se->vruntime -= cfs_rq->min_vruntime;
 }
 
@@ -1045,16 +1042,10 @@ static inline void hrtick_update(struct rq *rq)
  * then put the task into the rbtree:
  */
 static void
-enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup, bool head)
+enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
 {
        struct cfs_rq *cfs_rq;
        struct sched_entity *se = &p->se;
-       int flags = 0;
-
-       if (wakeup)
-               flags |= ENQUEUE_WAKEUP;
-       if (p->state == TASK_WAKING)
-               flags |= ENQUEUE_MIGRATE;
 
        for_each_sched_entity(se) {
                if (se->on_rq)
@@ -1072,18 +1063,18 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup, bool head)
  * decreased. We remove the task from the rbtree and
  * update the fair scheduling stats:
  */
-static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
+static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
 {
        struct cfs_rq *cfs_rq;
        struct sched_entity *se = &p->se;
 
        for_each_sched_entity(se) {
                cfs_rq = cfs_rq_of(se);
-               dequeue_entity(cfs_rq, se, sleep);
+               dequeue_entity(cfs_rq, se, flags);
                /* Don't dequeue parent if it has other entities besides us */
                if (cfs_rq->load.weight)
                        break;
-               sleep = 1;
+               flags |= DEQUEUE_SLEEP;
        }
 
        hrtick_update(rq);
@@ -1234,7 +1225,6 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
        unsigned long this_load, load;
        int idx, this_cpu, prev_cpu;
        unsigned long tl_per_task;
-       unsigned int imbalance;
        struct task_group *tg;
        unsigned long weight;
        int balanced;
@@ -1261,8 +1251,6 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
        tg = task_group(p);
        weight = p->se.load.weight;
 
-       imbalance = 100 + (sd->imbalance_pct - 100) / 2;
-
        /*
         * In low-load situations, where prev_cpu is idle and this_cpu is idle
         * due to the sync cause above having dropped this_load to 0, we'll
@@ -1272,9 +1260,21 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
         * Otherwise check if either cpus are near enough in load to allow this
         * task to be woken on this_cpu.
         */
-       balanced = !this_load ||
-               100*(this_load + effective_load(tg, this_cpu, weight, weight)) <=
-               imbalance*(load + effective_load(tg, prev_cpu, 0, weight));
+       if (this_load) {
+               unsigned long this_eff_load, prev_eff_load;
+
+               this_eff_load = 100;
+               this_eff_load *= power_of(prev_cpu);
+               this_eff_load *= this_load +
+                       effective_load(tg, this_cpu, weight, weight);
+
+               prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
+               prev_eff_load *= power_of(this_cpu);
+               prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
+
+               balanced = this_eff_load <= prev_eff_load;
+       } else
+               balanced = true;
 
        /*
         * If the currently running task will sleep within
@@ -1384,29 +1384,48 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
 /*
  * Try and locate an idle CPU in the sched_domain.
  */
-static int
-select_idle_sibling(struct task_struct *p, struct sched_domain *sd, int target)
+static int select_idle_sibling(struct task_struct *p, int target)
 {
        int cpu = smp_processor_id();
        int prev_cpu = task_cpu(p);
+       struct sched_domain *sd;
        int i;
 
        /*
-        * If this domain spans both cpu and prev_cpu (see the SD_WAKE_AFFINE
-        * test in select_task_rq_fair) and the prev_cpu is idle then that's
-        * always a better target than the current cpu.
+        * If the task is going to be woken-up on this cpu and if it is
+        * already idle, then it is the right target.
+        */
+       if (target == cpu && idle_cpu(cpu))
+               return cpu;
+
+       /*
+        * If the task is going to be woken-up on the cpu where it previously
+        * ran and if it is currently idle, then it the right target.
         */
-       if (target == cpu && !cpu_rq(prev_cpu)->cfs.nr_running)
+       if (target == prev_cpu && idle_cpu(prev_cpu))
                return prev_cpu;
 
        /*
-        * Otherwise, iterate the domain and find an elegible idle cpu.
+        * Otherwise, iterate the domains and find an elegible idle cpu.
         */
-       for_each_cpu_and(i, sched_domain_span(sd), &p->cpus_allowed) {
-               if (!cpu_rq(i)->cfs.nr_running) {
-                       target = i;
+       for_each_domain(target, sd) {
+               if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
                        break;
+
+               for_each_cpu_and(i, sched_domain_span(sd), &p->cpus_allowed) {
+                       if (idle_cpu(i)) {
+                               target = i;
+                               break;
+                       }
                }
+
+               /*
+                * Lets stop looking for an idle sibling when we reached
+                * the domain that spans the current cpu and prev_cpu.
+                */
+               if (cpumask_test_cpu(cpu, sched_domain_span(sd)) &&
+                   cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
+                       break;
        }
 
        return target;
@@ -1423,13 +1442,14 @@ select_idle_sibling(struct task_struct *p, struct sched_domain *sd, int target)
  *
  * preempt must be disabled.
  */
-static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
+static int
+select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_flags)
 {
        struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
        int cpu = smp_processor_id();
        int prev_cpu = task_cpu(p);
        int new_cpu = cpu;
-       int want_affine = 0, cpu_idle = !current->pid;
+       int want_affine = 0;
        int want_sd = 1;
        int sync = wake_flags & WF_SYNC;
 
@@ -1468,36 +1488,13 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
                }
 
                /*
-                * While iterating the domains looking for a spanning
-                * WAKE_AFFINE domain, adjust the affine target to any idle cpu
-                * in cache sharing domains along the way.
+                * If both cpu and prev_cpu are part of this domain,
+                * cpu is a valid SD_WAKE_AFFINE target.
                 */
-               if (want_affine) {
-                       int target = -1;
-
-                       /*
-                        * If both cpu and prev_cpu are part of this domain,
-                        * cpu is a valid SD_WAKE_AFFINE target.
-                        */
-                       if (cpumask_test_cpu(prev_cpu, sched_domain_span(tmp)))
-                               target = cpu;
-
-                       /*
-                        * If there's an idle sibling in this domain, make that
-                        * the wake_affine target instead of the current cpu.
-                        */
-                       if (!cpu_idle && tmp->flags & SD_SHARE_PKG_RESOURCES)
-                               target = select_idle_sibling(p, tmp, target);
-
-                       if (target >= 0) {
-                               if (tmp->flags & SD_WAKE_AFFINE) {
-                                       affine_sd = tmp;
-                                       want_affine = 0;
-                                       if (target != cpu)
-                                               cpu_idle = 1;
-                               }
-                               cpu = target;
-                       }
+               if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
+                   cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
+                       affine_sd = tmp;
+                       want_affine = 0;
                }
 
                if (!want_sd && !want_affine)
@@ -1516,19 +1513,22 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
                 * Pick the largest domain to update shares over
                 */
                tmp = sd;
-               if (affine_sd && (!tmp ||
-                                 cpumask_weight(sched_domain_span(affine_sd)) >
-                                 cpumask_weight(sched_domain_span(sd))))
+               if (affine_sd && (!tmp || affine_sd->span_weight > sd->span_weight))
                        tmp = affine_sd;
 
-               if (tmp)
+               if (tmp) {
+                       raw_spin_unlock(&rq->lock);
                        update_shares(tmp);
+                       raw_spin_lock(&rq->lock);
+               }
        }
 #endif
 
        if (affine_sd) {
-               if (cpu_idle || cpu == prev_cpu || wake_affine(affine_sd, p, sync))
-                       return cpu;
+               if (cpu == prev_cpu || wake_affine(affine_sd, p, sync))
+                       return select_idle_sibling(p, cpu);
+               else
+                       return select_idle_sibling(p, prev_cpu);
        }
 
        while (sd) {
@@ -1559,10 +1559,10 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
 
                /* Now try balancing at a lower domain level of new_cpu */
                cpu = new_cpu;
-               weight = cpumask_weight(sched_domain_span(sd));
+               weight = sd->span_weight;
                sd = NULL;
                for_each_domain(cpu, tmp) {
-                       if (weight <= cpumask_weight(sched_domain_span(tmp)))
+                       if (weight <= tmp->span_weight)
                                break;
                        if (tmp->flags & sd_flag)
                                sd = tmp;
@@ -2248,7 +2248,7 @@ unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
 
 unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
 {
-       unsigned long weight = cpumask_weight(sched_domain_span(sd));
+       unsigned long weight = sd->span_weight;
        unsigned long smt_gain = sd->smt_gain;
 
        smt_gain /= weight;
@@ -2281,7 +2281,7 @@ unsigned long scale_rt_power(int cpu)
 
 static void update_cpu_power(struct sched_domain *sd, int cpu)
 {
-       unsigned long weight = cpumask_weight(sched_domain_span(sd));
+       unsigned long weight = sd->span_weight;
        unsigned long power = SCHED_LOAD_SCALE;
        struct sched_group *sdg = sd->groups;
 
@@ -2307,6 +2307,7 @@ static void update_cpu_power(struct sched_domain *sd, int cpu)
        if (!power)
                power = 1;
 
+       cpu_rq(cpu)->cpu_power = power;
        sdg->cpu_power = power;
 }
 
@@ -2807,6 +2808,8 @@ static int need_active_balance(struct sched_domain *sd, int sd_idle, int idle)
        return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
 }
 
+static int active_load_balance_cpu_stop(void *data);
+
 /*
  * Check this_cpu to ensure it is balanced within domain. Attempt to move
  * tasks if there is an imbalance.
@@ -2896,8 +2899,9 @@ redo:
                if (need_active_balance(sd, sd_idle, idle)) {
                        raw_spin_lock_irqsave(&busiest->lock, flags);
 
-                       /* don't kick the migration_thread, if the curr
-                        * task on busiest cpu can't be moved to this_cpu
+                       /* don't kick the active_load_balance_cpu_stop,
+                        * if the curr task on busiest cpu can't be
+                        * moved to this_cpu
                         */
                        if (!cpumask_test_cpu(this_cpu,
                                              &busiest->curr->cpus_allowed)) {
@@ -2907,14 +2911,22 @@ redo:
                                goto out_one_pinned;
                        }
 
+                       /*
+                        * ->active_balance synchronizes accesses to
+                        * ->active_balance_work.  Once set, it's cleared
+                        * only after active load balance is finished.
+                        */
                        if (!busiest->active_balance) {
                                busiest->active_balance = 1;
                                busiest->push_cpu = this_cpu;
                                active_balance = 1;
                        }
                        raw_spin_unlock_irqrestore(&busiest->lock, flags);
+
                        if (active_balance)
-                               wake_up_process(busiest->migration_thread);
+                               stop_one_cpu_nowait(cpu_of(busiest),
+                                       active_load_balance_cpu_stop, busiest,
+                                       &busiest->active_balance_work);
 
                        /*
                         * We've kicked active balancing, reset the failure
@@ -3021,24 +3033,29 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
 }
 
 /*
- * active_load_balance is run by migration threads. It pushes running tasks
- * off the busiest CPU onto idle CPUs. It requires at least 1 task to be
- * running on each physical CPU where possible, and avoids physical /
- * logical imbalances.
- *
- * Called with busiest_rq locked.
+ * active_load_balance_cpu_stop is run by cpu stopper. It pushes
+ * running tasks off the busiest CPU onto idle CPUs. It requires at
+ * least 1 task to be running on each physical CPU where possible, and
+ * avoids physical / logical imbalances.
  */
-static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
+static int active_load_balance_cpu_stop(void *data)
 {
+       struct rq *busiest_rq = data;
+       int busiest_cpu = cpu_of(busiest_rq);
        int target_cpu = busiest_rq->push_cpu;
+       struct rq *target_rq = cpu_rq(target_cpu);
        struct sched_domain *sd;
-       struct rq *target_rq;
+
+       raw_spin_lock_irq(&busiest_rq->lock);
+
+       /* make sure the requested cpu hasn't gone down in the meantime */
+       if (unlikely(busiest_cpu != smp_processor_id() ||
+                    !busiest_rq->active_balance))
+               goto out_unlock;
 
        /* Is there any task to move? */
        if (busiest_rq->nr_running <= 1)
-               return;
-
-       target_rq = cpu_rq(target_cpu);
+               goto out_unlock;
 
        /*
         * This condition is "impossible", if it occurs
@@ -3067,6 +3084,10 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
                        schedstat_inc(sd, alb_failed);
        }
        double_unlock_balance(busiest_rq, target_rq);
+out_unlock:
+       busiest_rq->active_balance = 0;
+       raw_spin_unlock_irq(&busiest_rq->lock);
+       return 0;
 }
 
 #ifdef CONFIG_NO_HZ