* just go back and repeat.
*/
rq = task_rq_lock(p, &flags);
- trace_sched_wait_task(rq, p);
+ trace_sched_wait_task(p);
running = task_running(rq, p);
on_rq = p->se.on_rq;
ncsw = 0;
success = 1;
out_running:
- trace_sched_wakeup(rq, p, success);
+ trace_sched_wakeup(p, success);
check_preempt_curr(rq, p, wake_flags);
p->state = TASK_RUNNING;
rq = task_rq_lock(p, &flags);
activate_task(rq, p, 0);
- trace_sched_wakeup_new(rq, p, 1);
+ trace_sched_wakeup_new(p, 1);
check_preempt_curr(rq, p, WF_FORK);
#ifdef CONFIG_SMP
if (p->sched_class->task_woken)
struct mm_struct *mm, *oldmm;
prepare_task_switch(rq, prev, next);
- trace_sched_switch(rq, prev, next);
+ trace_sched_switch(prev, next);
mm = next->mm;
oldmm = prev->active_mm;
/*
void synchronize_sched_expedited(void)
{
+ barrier();
+}
+EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
+
+#else /* #ifndef CONFIG_SMP */
+
+static atomic_t synchronize_sched_expedited_count = ATOMIC_INIT(0);
+
+static int synchronize_sched_expedited_cpu_stop(void *data)
+{
/*
* There must be a full memory barrier on each affected CPU
* between the time that try_stop_cpus() is called and the
* necessary. Do smp_mb() anyway for documentation and
* robustness against future implementation changes.
*/
- smp_mb();
-}
-EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
-
-#else /* #ifndef CONFIG_SMP */
-
-static atomic_t synchronize_sched_expedited_count = ATOMIC_INIT(0);
-
-static int synchronize_sched_expedited_cpu_stop(void *data)
-{
- static DEFINE_SPINLOCK(done_mask_lock);
- struct cpumask *done_mask = data;
-
- if (done_mask) {
- spin_lock(&done_mask_lock);
- cpumask_set_cpu(smp_processor_id(), done_mask);
- spin_unlock(&done_mask_lock);
- }
+ smp_mb(); /* See above comment block. */
return 0;
}
*/
void synchronize_sched_expedited(void)
{
- cpumask_var_t done_mask_var;
- struct cpumask *done_mask = NULL;
int snap, trycount = 0;
- /*
- * done_mask is used to check that all cpus actually have
- * finished running the stopper, which is guaranteed by
- * stop_cpus() if it's called with cpu hotplug blocked. Keep
- * the paranoia for now but it's best effort if cpumask is off
- * stack.
- */
- if (zalloc_cpumask_var(&done_mask_var, GFP_ATOMIC))
- done_mask = done_mask_var;
-
smp_mb(); /* ensure prior mod happens before capturing snap. */
snap = atomic_read(&synchronize_sched_expedited_count) + 1;
get_online_cpus();
while (try_stop_cpus(cpu_online_mask,
synchronize_sched_expedited_cpu_stop,
- done_mask) == -EAGAIN) {
+ NULL) == -EAGAIN) {
put_online_cpus();
if (trycount++ < 10)
udelay(trycount * num_online_cpus());
else {
synchronize_sched();
- goto free_out;
+ return;
}
if (atomic_read(&synchronize_sched_expedited_count) - snap > 0) {
smp_mb(); /* ensure test happens before caller kfree */
- goto free_out;
+ return;
}
get_online_cpus();
}
atomic_inc(&synchronize_sched_expedited_count);
- if (done_mask)
- cpumask_xor(done_mask, done_mask, cpu_online_mask);
+ smp_mb__after_atomic_inc(); /* ensure post-GP actions seen after GP. */
put_online_cpus();
-
- /* paranoia - this can't happen */
- if (done_mask && cpumask_weight(done_mask)) {
- char buf[80];
-
- cpulist_scnprintf(buf, sizeof(buf), done_mask);
- WARN_ONCE(1, "synchronize_sched_expedited: cpu online and done masks disagree on %d cpus: %s\n",
- cpumask_weight(done_mask), buf);
- synchronize_sched();
- }
-free_out:
- free_cpumask_var(done_mask_var);
}
EXPORT_SYMBOL_GPL(synchronize_sched_expedited);