/* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
static unsigned int num_threads;
static atomic_t thread_ack;
-static struct completion finished;
static DEFINE_MUTEX(lock);
+/* setup_lock protects refcount, stop_machine_wq and stop_machine_work. */
+static DEFINE_MUTEX(setup_lock);
+/* Users of stop_machine. */
+static int refcount;
+static struct workqueue_struct *stop_machine_wq;
+static struct stop_machine_data active, idle;
+static const struct cpumask *active_cpus;
+static void *stop_machine_work;
static void set_state(enum stopmachine_state newstate)
{
/* Last one to ack a state moves to the next state. */
static void ack_state(void)
{
- if (atomic_dec_and_test(&thread_ack)) {
- /* If we're the last one to ack the EXIT, we're finished. */
- if (state == STOPMACHINE_EXIT)
- complete(&finished);
- else
- set_state(state + 1);
- }
+ if (atomic_dec_and_test(&thread_ack))
+ set_state(state + 1);
}
-/* This is the actual thread which stops the CPU. It exits by itself rather
- * than waiting for kthread_stop(), because it's easier for hotplug CPU. */
-static int stop_cpu(struct stop_machine_data *smdata)
+/* This is the actual function which stops the CPU. It runs
+ * in the context of a dedicated stopmachine workqueue. */
+static void stop_cpu(struct work_struct *unused)
{
enum stopmachine_state curstate = STOPMACHINE_NONE;
- int uninitialized_var(ret);
+ struct stop_machine_data *smdata = &idle;
+ int cpu = smp_processor_id();
+ int err;
+ if (!active_cpus) {
+ if (cpu == cpumask_first(cpu_online_mask))
+ smdata = &active;
+ } else {
+ if (cpumask_test_cpu(cpu, active_cpus))
+ smdata = &active;
+ }
/* Simple state machine */
do {
/* Chill out and ensure we re-read stopmachine_state. */
hard_irq_disable();
break;
case STOPMACHINE_RUN:
- /* |= allows error detection if functions on
- * multiple CPUs. */
- smdata->fnret |= smdata->fn(smdata->data);
+ /* On multiple CPUs only a single error code
+ * is needed to tell that something failed. */
+ err = smdata->fn(smdata->data);
+ if (err)
+ smdata->fnret = err;
break;
default:
break;
} while (curstate != STOPMACHINE_EXIT);
local_irq_enable();
- do_exit(0);
}
/* Callback for CPUs which aren't supposed to do anything. */
return 0;
}
-int __stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu)
+int stop_machine_create(void)
{
- int i, err;
- struct stop_machine_data active, idle;
- struct task_struct **threads;
+ mutex_lock(&setup_lock);
+ if (refcount)
+ goto done;
+ stop_machine_wq = create_rt_workqueue("kstop");
+ if (!stop_machine_wq)
+ goto err_out;
+ stop_machine_work = alloc_percpu(struct work_struct);
+ if (!stop_machine_work)
+ goto err_out;
+done:
+ refcount++;
+ mutex_unlock(&setup_lock);
+ return 0;
- active.fn = fn;
- active.data = data;
- active.fnret = 0;
- idle.fn = chill;
- idle.data = NULL;
+err_out:
+ if (stop_machine_wq)
+ destroy_workqueue(stop_machine_wq);
+ mutex_unlock(&setup_lock);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(stop_machine_create);
- /* If they don't care which cpu fn runs on, just pick one. */
- if (cpu == NR_CPUS)
- cpu = any_online_cpu(cpu_online_map);
+void stop_machine_destroy(void)
+{
+ mutex_lock(&setup_lock);
+ refcount--;
+ if (refcount)
+ goto done;
+ destroy_workqueue(stop_machine_wq);
+ free_percpu(stop_machine_work);
+done:
+ mutex_unlock(&setup_lock);
+}
+EXPORT_SYMBOL_GPL(stop_machine_destroy);
- /* This could be too big for stack on large machines. */
- threads = kcalloc(NR_CPUS, sizeof(threads[0]), GFP_KERNEL);
- if (!threads)
- return -ENOMEM;
+int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
+{
+ struct work_struct *sm_work;
+ int i, ret;
/* Set up initial state. */
mutex_lock(&lock);
- init_completion(&finished);
num_threads = num_online_cpus();
+ active_cpus = cpus;
+ active.fn = fn;
+ active.data = data;
+ active.fnret = 0;
+ idle.fn = chill;
+ idle.data = NULL;
+
set_state(STOPMACHINE_PREPARE);
+ /* Schedule the stop_cpu work on all cpus: hold this CPU so one
+ * doesn't hit this CPU until we're ready. */
+ get_cpu();
for_each_online_cpu(i) {
- struct stop_machine_data *smdata;
- struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
-
- if (cpu == ALL_CPUS || i == cpu)
- smdata = &active;
- else
- smdata = &idle;
-
- threads[i] = kthread_create((void *)stop_cpu, smdata, "kstop%u",
- i);
- if (IS_ERR(threads[i])) {
- err = PTR_ERR(threads[i]);
- threads[i] = NULL;
- goto kill_threads;
- }
-
- /* Place it onto correct cpu. */
- kthread_bind(threads[i], i);
-
- /* Make it highest prio. */
- if (sched_setscheduler_nocheck(threads[i], SCHED_FIFO, ¶m))
- BUG();
+ sm_work = per_cpu_ptr(stop_machine_work, i);
+ INIT_WORK(sm_work, stop_cpu);
+ queue_work_on(i, stop_machine_wq, sm_work);
}
-
- /* We've created all the threads. Wake them all: hold this CPU so one
- * doesn't hit this CPU until we're ready. */
- cpu = get_cpu();
- for_each_online_cpu(i)
- wake_up_process(threads[i]);
-
/* This will release the thread on our CPU. */
put_cpu();
- wait_for_completion(&finished);
+ flush_workqueue(stop_machine_wq);
+ ret = active.fnret;
mutex_unlock(&lock);
-
- kfree(threads);
-
- return active.fnret;
-
-kill_threads:
- for_each_online_cpu(i)
- if (threads[i])
- kthread_stop(threads[i]);
- mutex_unlock(&lock);
-
- kfree(threads);
- return err;
+ return ret;
}
-int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu)
+int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
{
int ret;
+ ret = stop_machine_create();
+ if (ret)
+ return ret;
/* No CPUs can come up or down during this. */
get_online_cpus();
- ret = __stop_machine_run(fn, data, cpu);
+ ret = __stop_machine(fn, data, cpus);
put_online_cpus();
-
+ stop_machine_destroy();
return ret;
}
-EXPORT_SYMBOL_GPL(stop_machine_run);
+EXPORT_SYMBOL_GPL(stop_machine);