Merge branch 'cpu_stop' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/misc...
[safe/jmp/linux-2.6] / kernel / sched.c
index f1d577a..39aa9c7 100644 (file)
@@ -2155,7 +2155,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
                 * just go back and repeat.
                 */
                rq = task_rq_lock(p, &flags);
-               trace_sched_wait_task(rq, p);
+               trace_sched_wait_task(p);
                running = task_running(rq, p);
                on_rq = p->se.on_rq;
                ncsw = 0;
@@ -2426,7 +2426,7 @@ out_activate:
        success = 1;
 
 out_running:
-       trace_sched_wakeup(rq, p, success);
+       trace_sched_wakeup(p, success);
        check_preempt_curr(rq, p, wake_flags);
 
        p->state = TASK_RUNNING;
@@ -2600,7 +2600,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
 
        rq = task_rq_lock(p, &flags);
        activate_task(rq, p, 0);
-       trace_sched_wakeup_new(rq, p, 1);
+       trace_sched_wakeup_new(p, 1);
        check_preempt_curr(rq, p, WF_FORK);
 #ifdef CONFIG_SMP
        if (p->sched_class->task_woken)
@@ -2820,7 +2820,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
        struct mm_struct *mm, *oldmm;
 
        prepare_task_switch(rq, prev, next);
-       trace_sched_switch(rq, prev, next);
+       trace_sched_switch(prev, next);
        mm = next->mm;
        oldmm = prev->active_mm;
        /*
@@ -8932,6 +8932,16 @@ struct cgroup_subsys cpuacct_subsys = {
 
 void synchronize_sched_expedited(void)
 {
+       barrier();
+}
+EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
+
+#else /* #ifndef CONFIG_SMP */
+
+static atomic_t synchronize_sched_expedited_count = ATOMIC_INIT(0);
+
+static int synchronize_sched_expedited_cpu_stop(void *data)
+{
        /*
         * There must be a full memory barrier on each affected CPU
         * between the time that try_stop_cpus() is called and the
@@ -8943,24 +8953,7 @@ void synchronize_sched_expedited(void)
         * necessary.  Do smp_mb() anyway for documentation and
         * robustness against future implementation changes.
         */
-       smp_mb();
-}
-EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
-
-#else /* #ifndef CONFIG_SMP */
-
-static atomic_t synchronize_sched_expedited_count = ATOMIC_INIT(0);
-
-static int synchronize_sched_expedited_cpu_stop(void *data)
-{
-       static DEFINE_SPINLOCK(done_mask_lock);
-       struct cpumask *done_mask = data;
-
-       if (done_mask) {
-               spin_lock(&done_mask_lock);
-               cpumask_set_cpu(smp_processor_id(), done_mask);
-               spin_unlock(&done_mask_lock);
-       }
+       smp_mb(); /* See above comment block. */
        return 0;
 }
 
@@ -8976,55 +8969,30 @@ static int synchronize_sched_expedited_cpu_stop(void *data)
  */
 void synchronize_sched_expedited(void)
 {
-       cpumask_var_t done_mask_var;
-       struct cpumask *done_mask = NULL;
        int snap, trycount = 0;
 
-       /*
-        * done_mask is used to check that all cpus actually have
-        * finished running the stopper, which is guaranteed by
-        * stop_cpus() if it's called with cpu hotplug blocked.  Keep
-        * the paranoia for now but it's best effort if cpumask is off
-        * stack.
-        */
-       if (zalloc_cpumask_var(&done_mask_var, GFP_ATOMIC))
-               done_mask = done_mask_var;
-
        smp_mb();  /* ensure prior mod happens before capturing snap. */
        snap = atomic_read(&synchronize_sched_expedited_count) + 1;
        get_online_cpus();
        while (try_stop_cpus(cpu_online_mask,
                             synchronize_sched_expedited_cpu_stop,
-                            done_mask) == -EAGAIN) {
+                            NULL) == -EAGAIN) {
                put_online_cpus();
                if (trycount++ < 10)
                        udelay(trycount * num_online_cpus());
                else {
                        synchronize_sched();
-                       goto free_out;
+                       return;
                }
                if (atomic_read(&synchronize_sched_expedited_count) - snap > 0) {
                        smp_mb(); /* ensure test happens before caller kfree */
-                       goto free_out;
+                       return;
                }
                get_online_cpus();
        }
        atomic_inc(&synchronize_sched_expedited_count);
-       if (done_mask)
-               cpumask_xor(done_mask, done_mask, cpu_online_mask);
+       smp_mb__after_atomic_inc(); /* ensure post-GP actions seen after GP. */
        put_online_cpus();
-
-       /* paranoia - this can't happen */
-       if (done_mask && cpumask_weight(done_mask)) {
-               char buf[80];
-
-               cpulist_scnprintf(buf, sizeof(buf), done_mask);
-               WARN_ONCE(1, "synchronize_sched_expedited: cpu online and done masks disagree on %d cpus: %s\n",
-                         cpumask_weight(done_mask), buf);
-               synchronize_sched();
-       }
-free_out:
-       free_cpumask_var(done_mask_var);
 }
 EXPORT_SYMBOL_GPL(synchronize_sched_expedited);