X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=kernel%2Fstop_machine.c;h=b4e7431e7c78996607ef0495d7cb50bc34621d2c;hb=b8ae30ee26d379db436b0b8c8c3ff1b52f69e5d1;hp=b7350bbfb076293dd6d13d7ba060b3aba1f673a9;hpb=3401a61e16a5b852d4e353c8850c857105a67a9c;p=safe%2Fjmp%2Flinux-2.6 diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index b7350bb..b4e7431 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -1,215 +1,486 @@ -/* Copyright 2005 Rusty Russell rusty@rustcorp.com.au IBM Corporation. - * GPL v2 and any later version. +/* + * kernel/stop_machine.c + * + * Copyright (C) 2008, 2005 IBM Corporation. + * Copyright (C) 2008, 2005 Rusty Russell rusty@rustcorp.com.au + * Copyright (C) 2010 SUSE Linux Products GmbH + * Copyright (C) 2010 Tejun Heo + * + * This file is released under the GPLv2 and any later version. */ +#include #include -#include +#include #include #include +#include #include #include -#include #include +#include #include -#include -/* Since we effect priority and affinity (both of which are visible - * to, and settable by outside processes) we do indirection via a - * kthread. */ +/* + * Structure to determine completion condition and record errors. May + * be shared by works on different cpus. + */ +struct cpu_stop_done { + atomic_t nr_todo; /* nr left to execute */ + bool executed; /* actually executed? */ + int ret; /* collected return value */ + struct completion completion; /* fired if nr_todo reaches 0 */ +}; -/* Thread to stop each CPU in user context. */ -enum stopmachine_state { - STOPMACHINE_WAIT, - STOPMACHINE_PREPARE, - STOPMACHINE_DISABLE_IRQ, - STOPMACHINE_EXIT, +/* the actual stopper, one per every possible cpu, enabled on online cpus */ +struct cpu_stopper { + spinlock_t lock; + struct list_head works; /* list of pending works */ + struct task_struct *thread; /* stopper thread */ + bool enabled; /* is this stopper enabled? */ }; -static enum stopmachine_state stopmachine_state; -static unsigned int stopmachine_num_threads; -static atomic_t stopmachine_thread_ack; +static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper); -static int stopmachine(void *cpu) +static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo) { - int irqs_disabled = 0; - int prepared = 0; + memset(done, 0, sizeof(*done)); + atomic_set(&done->nr_todo, nr_todo); + init_completion(&done->completion); +} - set_cpus_allowed_ptr(current, &cpumask_of_cpu((int)(long)cpu)); +/* signal completion unless @done is NULL */ +static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed) +{ + if (done) { + if (executed) + done->executed = true; + if (atomic_dec_and_test(&done->nr_todo)) + complete(&done->completion); + } +} - /* Ack: we are alive */ - smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */ - atomic_inc(&stopmachine_thread_ack); +/* queue @work to @stopper. if offline, @work is completed immediately */ +static void cpu_stop_queue_work(struct cpu_stopper *stopper, + struct cpu_stop_work *work) +{ + unsigned long flags; - /* Simple state machine */ - while (stopmachine_state != STOPMACHINE_EXIT) { - if (stopmachine_state == STOPMACHINE_DISABLE_IRQ - && !irqs_disabled) { - local_irq_disable(); - hard_irq_disable(); - irqs_disabled = 1; - /* Ack: irqs disabled. */ - smp_mb(); /* Must read state first. */ - atomic_inc(&stopmachine_thread_ack); - } else if (stopmachine_state == STOPMACHINE_PREPARE - && !prepared) { - /* Everyone is in place, hold CPU. */ - preempt_disable(); - prepared = 1; - smp_mb(); /* Must read state first. */ - atomic_inc(&stopmachine_thread_ack); - } - /* Yield in first stage: migration threads need to - * help our sisters onto their CPUs. */ - if (!prepared && !irqs_disabled) - yield(); - cpu_relax(); + spin_lock_irqsave(&stopper->lock, flags); + + if (stopper->enabled) { + list_add_tail(&work->list, &stopper->works); + wake_up_process(stopper->thread); + } else + cpu_stop_signal_done(work->done, false); + + spin_unlock_irqrestore(&stopper->lock, flags); +} + +/** + * stop_one_cpu - stop a cpu + * @cpu: cpu to stop + * @fn: function to execute + * @arg: argument to @fn + * + * Execute @fn(@arg) on @cpu. @fn is run in a process context with + * the highest priority preempting any task on the cpu and + * monopolizing it. This function returns after the execution is + * complete. + * + * This function doesn't guarantee @cpu stays online till @fn + * completes. If @cpu goes down in the middle, execution may happen + * partially or fully on different cpus. @fn should either be ready + * for that or the caller should ensure that @cpu stays online until + * this function completes. + * + * CONTEXT: + * Might sleep. + * + * RETURNS: + * -ENOENT if @fn(@arg) was not executed because @cpu was offline; + * otherwise, the return value of @fn. + */ +int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg) +{ + struct cpu_stop_done done; + struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done }; + + cpu_stop_init_done(&done, 1); + cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), &work); + wait_for_completion(&done.completion); + return done.executed ? done.ret : -ENOENT; +} + +/** + * stop_one_cpu_nowait - stop a cpu but don't wait for completion + * @cpu: cpu to stop + * @fn: function to execute + * @arg: argument to @fn + * + * Similar to stop_one_cpu() but doesn't wait for completion. The + * caller is responsible for ensuring @work_buf is currently unused + * and will remain untouched until stopper starts executing @fn. + * + * CONTEXT: + * Don't care. + */ +void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, + struct cpu_stop_work *work_buf) +{ + *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, }; + cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), work_buf); +} + +/* static data for stop_cpus */ +static DEFINE_MUTEX(stop_cpus_mutex); +static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work); + +int __stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) +{ + struct cpu_stop_work *work; + struct cpu_stop_done done; + unsigned int cpu; + + /* initialize works and done */ + for_each_cpu(cpu, cpumask) { + work = &per_cpu(stop_cpus_work, cpu); + work->fn = fn; + work->arg = arg; + work->done = &done; } + cpu_stop_init_done(&done, cpumask_weight(cpumask)); - /* Ack: we are exiting. */ - smp_mb(); /* Must read state first. */ - atomic_inc(&stopmachine_thread_ack); + /* + * Disable preemption while queueing to avoid getting + * preempted by a stopper which might wait for other stoppers + * to enter @fn which can lead to deadlock. + */ + preempt_disable(); + for_each_cpu(cpu, cpumask) + cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), + &per_cpu(stop_cpus_work, cpu)); + preempt_enable(); - if (irqs_disabled) - local_irq_enable(); - if (prepared) - preempt_enable(); + wait_for_completion(&done.completion); + return done.executed ? done.ret : -ENOENT; +} - return 0; +/** + * stop_cpus - stop multiple cpus + * @cpumask: cpus to stop + * @fn: function to execute + * @arg: argument to @fn + * + * Execute @fn(@arg) on online cpus in @cpumask. On each target cpu, + * @fn is run in a process context with the highest priority + * preempting any task on the cpu and monopolizing it. This function + * returns after all executions are complete. + * + * This function doesn't guarantee the cpus in @cpumask stay online + * till @fn completes. If some cpus go down in the middle, execution + * on the cpu may happen partially or fully on different cpus. @fn + * should either be ready for that or the caller should ensure that + * the cpus stay online until this function completes. + * + * All stop_cpus() calls are serialized making it safe for @fn to wait + * for all cpus to start executing it. + * + * CONTEXT: + * Might sleep. + * + * RETURNS: + * -ENOENT if @fn(@arg) was not executed at all because all cpus in + * @cpumask were offline; otherwise, 0 if all executions of @fn + * returned 0, any non zero return value if any returned non zero. + */ +int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) +{ + int ret; + + /* static works are used, process one request at a time */ + mutex_lock(&stop_cpus_mutex); + ret = __stop_cpus(cpumask, fn, arg); + mutex_unlock(&stop_cpus_mutex); + return ret; } -/* Change the thread state */ -static void stopmachine_set_state(enum stopmachine_state state) +/** + * try_stop_cpus - try to stop multiple cpus + * @cpumask: cpus to stop + * @fn: function to execute + * @arg: argument to @fn + * + * Identical to stop_cpus() except that it fails with -EAGAIN if + * someone else is already using the facility. + * + * CONTEXT: + * Might sleep. + * + * RETURNS: + * -EAGAIN if someone else is already stopping cpus, -ENOENT if + * @fn(@arg) was not executed at all because all cpus in @cpumask were + * offline; otherwise, 0 if all executions of @fn returned 0, any non + * zero return value if any returned non zero. + */ +int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) { - atomic_set(&stopmachine_thread_ack, 0); - smp_wmb(); - stopmachine_state = state; - while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads) - cpu_relax(); + int ret; + + /* static works are used, process one request at a time */ + if (!mutex_trylock(&stop_cpus_mutex)) + return -EAGAIN; + ret = __stop_cpus(cpumask, fn, arg); + mutex_unlock(&stop_cpus_mutex); + return ret; } -static int stop_machine(void) +static int cpu_stopper_thread(void *data) { - int i, ret = 0; + struct cpu_stopper *stopper = data; + struct cpu_stop_work *work; + int ret; - atomic_set(&stopmachine_thread_ack, 0); - stopmachine_num_threads = 0; - stopmachine_state = STOPMACHINE_WAIT; +repeat: + set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */ - for_each_online_cpu(i) { - if (i == raw_smp_processor_id()) - continue; - ret = kernel_thread(stopmachine, (void *)(long)i,CLONE_KERNEL); - if (ret < 0) - break; - stopmachine_num_threads++; + if (kthread_should_stop()) { + __set_current_state(TASK_RUNNING); + return 0; } - /* Wait for them all to come to life. */ - while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads) { - yield(); - cpu_relax(); + work = NULL; + spin_lock_irq(&stopper->lock); + if (!list_empty(&stopper->works)) { + work = list_first_entry(&stopper->works, + struct cpu_stop_work, list); + list_del_init(&work->list); } + spin_unlock_irq(&stopper->lock); - /* If some failed, kill them all. */ - if (ret < 0) { - stopmachine_set_state(STOPMACHINE_EXIT); - return ret; - } + if (work) { + cpu_stop_fn_t fn = work->fn; + void *arg = work->arg; + struct cpu_stop_done *done = work->done; + char ksym_buf[KSYM_NAME_LEN]; - /* Now they are all started, make them hold the CPUs, ready. */ - preempt_disable(); - stopmachine_set_state(STOPMACHINE_PREPARE); + __set_current_state(TASK_RUNNING); - /* Make them disable irqs. */ - local_irq_disable(); - hard_irq_disable(); - stopmachine_set_state(STOPMACHINE_DISABLE_IRQ); + /* cpu stop callbacks are not allowed to sleep */ + preempt_disable(); - return 0; + ret = fn(arg); + if (ret) + done->ret = ret; + + /* restore preemption and check it's still balanced */ + preempt_enable(); + WARN_ONCE(preempt_count(), + "cpu_stop: %s(%p) leaked preempt count\n", + kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL, + ksym_buf), arg); + + cpu_stop_signal_done(done, true); + } else + schedule(); + + goto repeat; } -static void restart_machine(void) +/* manage stopper for a cpu, mostly lifted from sched migration thread mgmt */ +static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu) { - stopmachine_set_state(STOPMACHINE_EXIT); - local_irq_enable(); - preempt_enable_no_resched(); + struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; + unsigned int cpu = (unsigned long)hcpu; + struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); + struct task_struct *p; + + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_UP_PREPARE: + BUG_ON(stopper->thread || stopper->enabled || + !list_empty(&stopper->works)); + p = kthread_create(cpu_stopper_thread, stopper, "migration/%d", + cpu); + if (IS_ERR(p)) + return NOTIFY_BAD; + sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); + get_task_struct(p); + stopper->thread = p; + break; + + case CPU_ONLINE: + kthread_bind(stopper->thread, cpu); + /* strictly unnecessary, as first user will wake it */ + wake_up_process(stopper->thread); + /* mark enabled */ + spin_lock_irq(&stopper->lock); + stopper->enabled = true; + spin_unlock_irq(&stopper->lock); + break; + +#ifdef CONFIG_HOTPLUG_CPU + case CPU_UP_CANCELED: + case CPU_DEAD: + { + struct cpu_stop_work *work; + + /* kill the stopper */ + kthread_stop(stopper->thread); + /* drain remaining works */ + spin_lock_irq(&stopper->lock); + list_for_each_entry(work, &stopper->works, list) + cpu_stop_signal_done(work->done, false); + stopper->enabled = false; + spin_unlock_irq(&stopper->lock); + /* release the stopper */ + put_task_struct(stopper->thread); + stopper->thread = NULL; + break; + } +#endif + } + + return NOTIFY_OK; } -struct stop_machine_data { - int (*fn)(void *); - void *data; - struct completion done; +/* + * Give it a higher priority so that cpu stopper is available to other + * cpu notifiers. It currently shares the same priority as sched + * migration_notifier. + */ +static struct notifier_block __cpuinitdata cpu_stop_cpu_notifier = { + .notifier_call = cpu_stop_cpu_callback, + .priority = 10, }; -static int do_stop(void *_smdata) +static int __init cpu_stop_init(void) { - struct stop_machine_data *smdata = _smdata; - int ret; + void *bcpu = (void *)(long)smp_processor_id(); + unsigned int cpu; + int err; + + for_each_possible_cpu(cpu) { + struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); - ret = stop_machine(); - if (ret == 0) { - ret = smdata->fn(smdata->data); - restart_machine(); + spin_lock_init(&stopper->lock); + INIT_LIST_HEAD(&stopper->works); } - /* We're done: you can kthread_stop us now */ - complete(&smdata->done); + /* start one for the boot cpu */ + err = cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_UP_PREPARE, + bcpu); + BUG_ON(err == NOTIFY_BAD); + cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_ONLINE, bcpu); + register_cpu_notifier(&cpu_stop_cpu_notifier); - /* Wait for kthread_stop */ - set_current_state(TASK_INTERRUPTIBLE); - while (!kthread_should_stop()) { - schedule(); - set_current_state(TASK_INTERRUPTIBLE); - } - __set_current_state(TASK_RUNNING); - return ret; + return 0; } +early_initcall(cpu_stop_init); -struct task_struct *__stop_machine_run(int (*fn)(void *), void *data, - unsigned int cpu) +#ifdef CONFIG_STOP_MACHINE + +/* This controls the threads on each CPU. */ +enum stopmachine_state { + /* Dummy starting state for thread. */ + STOPMACHINE_NONE, + /* Awaiting everyone to be scheduled. */ + STOPMACHINE_PREPARE, + /* Disable interrupts. */ + STOPMACHINE_DISABLE_IRQ, + /* Run the function */ + STOPMACHINE_RUN, + /* Exit */ + STOPMACHINE_EXIT, +}; + +struct stop_machine_data { + int (*fn)(void *); + void *data; + /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */ + unsigned int num_threads; + const struct cpumask *active_cpus; + + enum stopmachine_state state; + atomic_t thread_ack; +}; + +static void set_state(struct stop_machine_data *smdata, + enum stopmachine_state newstate) { - static DEFINE_MUTEX(stopmachine_mutex); - struct stop_machine_data smdata; - struct task_struct *p; + /* Reset ack counter. */ + atomic_set(&smdata->thread_ack, smdata->num_threads); + smp_wmb(); + smdata->state = newstate; +} + +/* Last one to ack a state moves to the next state. */ +static void ack_state(struct stop_machine_data *smdata) +{ + if (atomic_dec_and_test(&smdata->thread_ack)) + set_state(smdata, smdata->state + 1); +} + +/* This is the cpu_stop function which stops the CPU. */ +static int stop_machine_cpu_stop(void *data) +{ + struct stop_machine_data *smdata = data; + enum stopmachine_state curstate = STOPMACHINE_NONE; + int cpu = smp_processor_id(), err = 0; + bool is_active; - smdata.fn = fn; - smdata.data = data; - init_completion(&smdata.done); + if (!smdata->active_cpus) + is_active = cpu == cpumask_first(cpu_online_mask); + else + is_active = cpumask_test_cpu(cpu, smdata->active_cpus); - mutex_lock(&stopmachine_mutex); + /* Simple state machine */ + do { + /* Chill out and ensure we re-read stopmachine_state. */ + cpu_relax(); + if (smdata->state != curstate) { + curstate = smdata->state; + switch (curstate) { + case STOPMACHINE_DISABLE_IRQ: + local_irq_disable(); + hard_irq_disable(); + break; + case STOPMACHINE_RUN: + if (is_active) + err = smdata->fn(smdata->data); + break; + default: + break; + } + ack_state(smdata); + } + } while (curstate != STOPMACHINE_EXIT); - /* If they don't care which CPU fn runs on, bind to any online one. */ - if (cpu == NR_CPUS) - cpu = raw_smp_processor_id(); + local_irq_enable(); + return err; +} - p = kthread_create(do_stop, &smdata, "kstopmachine"); - if (!IS_ERR(p)) { - struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; +int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) +{ + struct stop_machine_data smdata = { .fn = fn, .data = data, + .num_threads = num_online_cpus(), + .active_cpus = cpus }; - /* One high-prio thread per cpu. We'll do this one. */ - sched_setscheduler(p, SCHED_FIFO, ¶m); - kthread_bind(p, cpu); - wake_up_process(p); - wait_for_completion(&smdata.done); - } - mutex_unlock(&stopmachine_mutex); - return p; + /* Set the initial state and stop all online cpus. */ + set_state(&smdata, STOPMACHINE_PREPARE); + return stop_cpus(cpu_online_mask, stop_machine_cpu_stop, &smdata); } -int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu) +int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) { - struct task_struct *p; int ret; /* No CPUs can come up or down during this. */ get_online_cpus(); - p = __stop_machine_run(fn, data, cpu); - if (!IS_ERR(p)) - ret = kthread_stop(p); - else - ret = PTR_ERR(p); + ret = __stop_machine(fn, data, cpus); put_online_cpus(); - return ret; } -EXPORT_SYMBOL_GPL(stop_machine_run); +EXPORT_SYMBOL_GPL(stop_machine); + +#endif /* CONFIG_STOP_MACHINE */