2 * Generic helpers for smp ipi calls
4 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
7 #include <linux/init.h>
8 #include <linux/module.h>
9 #include <linux/percpu.h>
10 #include <linux/rcupdate.h>
11 #include <linux/rculist.h>
12 #include <linux/smp.h>
13 #include <linux/cpu.h>
15 static DEFINE_PER_CPU(struct call_single_queue, call_single_queue);
18 struct list_head queue;
20 } call_function __cacheline_aligned_in_smp = {
21 .queue = LIST_HEAD_INIT(call_function.queue),
22 .lock = __SPIN_LOCK_UNLOCKED(call_function.lock),
29 struct call_function_data {
30 struct call_single_data csd;
33 cpumask_var_t cpumask;
36 struct call_single_queue {
37 struct list_head list;
41 static DEFINE_PER_CPU(struct call_function_data, cfd_data) = {
42 .lock = __SPIN_LOCK_UNLOCKED(cfd_data.lock),
46 hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
48 long cpu = (long)hcpu;
49 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
53 case CPU_UP_PREPARE_FROZEN:
54 if (!alloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
59 #ifdef CONFIG_CPU_HOTPLUG
61 case CPU_UP_CANCELED_FROZEN:
65 free_cpumask_var(cfd->cpumask);
73 static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
74 .notifier_call = hotplug_cfd,
77 static int __cpuinit init_call_single_data(void)
79 void *cpu = (void *)(long)smp_processor_id();
82 for_each_possible_cpu(i) {
83 struct call_single_queue *q = &per_cpu(call_single_queue, i);
85 spin_lock_init(&q->lock);
86 INIT_LIST_HEAD(&q->list);
89 hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
90 register_cpu_notifier(&hotplug_cfd_notifier);
94 early_initcall(init_call_single_data);
97 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
99 * For non-synchronous ipi calls the csd can still be in use by the previous
100 * function call. For multi-cpu calls its even more interesting as we'll have
101 * to ensure no other cpu is observing our csd.
103 static void csd_lock_wait(struct call_single_data *data)
105 while (data->flags & CSD_FLAG_LOCK)
109 static void csd_lock(struct call_single_data *data)
112 data->flags = CSD_FLAG_LOCK;
115 * prevent CPU from reordering the above assignment to ->flags
116 * with any subsequent assignments to other fields of the
117 * specified call_single_data structure.
123 static void csd_unlock(struct call_single_data *data)
125 WARN_ON(!(data->flags & CSD_FLAG_LOCK));
127 * ensure we're all done before releasing data
130 data->flags &= ~CSD_FLAG_LOCK;
134 * Insert a previously allocated call_single_data element for execution
135 * on the given CPU. data must already have ->func, ->info, and ->flags set.
138 void generic_exec_single(int cpu, struct call_single_data *data, int wait)
140 struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
144 spin_lock_irqsave(&dst->lock, flags);
145 ipi = list_empty(&dst->list);
146 list_add_tail(&data->list, &dst->list);
147 spin_unlock_irqrestore(&dst->lock, flags);
150 * The list addition should be visible before sending the IPI
151 * handler locks the list to pull the entry off it because of
152 * normal cache coherency rules implied by spinlocks.
154 * If IPIs can go out of order to the cache coherency protocol
155 * in an architecture, sufficient synchronisation should be added
156 * to arch code to make it appear to obey cache coherency WRT
157 * locking and barrier primitives. Generic code isn't really equipped
158 * to do the right thing...
162 arch_send_call_function_single_ipi(cpu);
169 * Invoked by arch to handle an IPI for call function. Must be called with
170 * interrupts disabled.
172 void generic_smp_call_function_interrupt(void)
174 struct call_function_data *data;
178 * Ensure entry is visible on call_function_queue after we have
179 * entered the IPI. See comment in smp_call_function_many.
180 * If we don't have this, then we may miss an entry on the list
181 * and never get another IPI to process it.
186 * It's ok to use list_for_each_rcu() here even though we may delete
187 * 'pos', since list_del_rcu() doesn't clear ->next
189 list_for_each_entry_rcu(data, &call_function.queue, csd.list) {
192 spin_lock(&data->lock);
193 if (!cpumask_test_cpu(cpu, data->cpumask)) {
194 spin_unlock(&data->lock);
197 cpumask_clear_cpu(cpu, data->cpumask);
198 spin_unlock(&data->lock);
200 data->csd.func(data->csd.info);
202 spin_lock(&data->lock);
203 WARN_ON(data->refs == 0);
206 spin_lock(&call_function.lock);
207 list_del_rcu(&data->csd.list);
208 spin_unlock(&call_function.lock);
210 spin_unlock(&data->lock);
215 csd_unlock(&data->csd);
222 * Invoked by arch to handle an IPI for call function single. Must be called
223 * from the arch with interrupts disabled.
225 void generic_smp_call_function_single_interrupt(void)
227 struct call_single_queue *q = &__get_cpu_var(call_single_queue);
229 unsigned int data_flags;
232 list_replace_init(&q->list, &list);
233 spin_unlock(&q->lock);
235 while (!list_empty(&list)) {
236 struct call_single_data *data;
238 data = list_entry(list.next, struct call_single_data,
240 list_del(&data->list);
243 * 'data' can be invalid after this call if
244 * flags == 0 (when called through
245 * generic_exec_single(), so save them away before
248 data_flags = data->flags;
250 data->func(data->info);
253 * Unlocked CSDs are valid through generic_exec_single()
255 if (data_flags & CSD_FLAG_LOCK)
260 static DEFINE_PER_CPU(struct call_single_data, csd_data);
263 * smp_call_function_single - Run a function on a specific CPU
264 * @func: The function to run. This must be fast and non-blocking.
265 * @info: An arbitrary pointer to pass to the function.
266 * @wait: If true, wait until function has completed on other CPUs.
268 * Returns 0 on success, else a negative status code. Note that @wait
269 * will be implicitly turned on in case of allocation failures, since
270 * we fall back to on-stack allocation.
272 int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
275 struct call_single_data d = {
279 /* prevent preemption and reschedule on another processor,
280 as well as CPU removal */
284 /* Can deadlock when called with interrupts disabled */
285 WARN_ON(irqs_disabled());
288 local_irq_save(flags);
290 local_irq_restore(flags);
291 } else if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
292 struct call_single_data *data = &d;
295 data = &__get_cpu_var(csd_data);
301 generic_exec_single(cpu, data, wait);
303 err = -ENXIO; /* CPU not online */
309 EXPORT_SYMBOL(smp_call_function_single);
312 * __smp_call_function_single(): Run a function on another CPU
313 * @cpu: The CPU to run on.
314 * @data: Pre-allocated and setup data structure
316 * Like smp_call_function_single(), but allow caller to pass in a pre-allocated
317 * data structure. Useful for embedding @data inside other structures, for
321 void __smp_call_function_single(int cpu, struct call_single_data *data,
326 /* Can deadlock when called with interrupts disabled */
327 WARN_ON(wait && irqs_disabled());
329 generic_exec_single(cpu, data, wait);
332 /* FIXME: Shim for archs using old arch_send_call_function_ipi API. */
333 #ifndef arch_send_call_function_ipi_mask
334 #define arch_send_call_function_ipi_mask(maskp) \
335 arch_send_call_function_ipi(*(maskp))
339 * smp_call_function_many(): Run a function on a set of other CPUs.
340 * @mask: The set of cpus to run on (only runs on online subset).
341 * @func: The function to run. This must be fast and non-blocking.
342 * @info: An arbitrary pointer to pass to the function.
343 * @wait: If true, wait (atomically) until function has completed on other CPUs.
345 * If @wait is true, then returns once @func has returned. Note that @wait
346 * will be implicitly turned on in case of allocation failures, since
347 * we fall back to on-stack allocation.
349 * You must not call this function with disabled interrupts or from a
350 * hardware interrupt handler or from a bottom half handler. Preemption
351 * must be disabled when calling this function.
353 void smp_call_function_many(const struct cpumask *mask,
354 void (*func)(void *), void *info,
357 struct call_function_data *data;
359 int cpu, next_cpu, me = smp_processor_id();
361 /* Can deadlock when called with interrupts disabled */
362 WARN_ON(irqs_disabled());
364 /* So, what's a CPU they want? Ignoring this one. */
365 cpu = cpumask_first_and(mask, cpu_online_mask);
367 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
368 /* No online cpus? We're done. */
369 if (cpu >= nr_cpu_ids)
372 /* Do we have another CPU which isn't us? */
373 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
375 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
377 /* Fastpath: do that cpu by itself. */
378 if (next_cpu >= nr_cpu_ids) {
379 smp_call_function_single(cpu, func, info, wait);
383 data = &__get_cpu_var(cfd_data);
384 csd_lock(&data->csd);
386 spin_lock_irqsave(&data->lock, flags);
387 data->csd.func = func;
388 data->csd.info = info;
389 cpumask_and(data->cpumask, mask, cpu_online_mask);
390 cpumask_clear_cpu(me, data->cpumask);
391 data->refs = cpumask_weight(data->cpumask);
393 spin_lock(&call_function.lock);
395 * Place entry at the _HEAD_ of the list, so that any cpu still
396 * observing the entry in generic_smp_call_function_interrupt() will
397 * not miss any other list entries.
399 list_add_rcu(&data->csd.list, &call_function.queue);
400 spin_unlock(&call_function.lock);
401 spin_unlock_irqrestore(&data->lock, flags);
404 * Make the list addition visible before sending the ipi.
405 * (IPIs must obey or appear to obey normal Linux cache coherency
406 * rules -- see comment in generic_exec_single).
410 /* Send a message to all CPUs in the map */
411 arch_send_call_function_ipi_mask(data->cpumask);
413 /* optionally wait for the CPUs to complete */
415 csd_lock_wait(&data->csd);
417 EXPORT_SYMBOL(smp_call_function_many);
420 * smp_call_function(): Run a function on all other CPUs.
421 * @func: The function to run. This must be fast and non-blocking.
422 * @info: An arbitrary pointer to pass to the function.
423 * @wait: If true, wait (atomically) until function has completed on other CPUs.
427 * If @wait is true, then returns once @func has returned; otherwise
428 * it returns just before the target cpu calls @func. In case of allocation
429 * failure, @wait will be implicitly turned on.
431 * You must not call this function with disabled interrupts or from a
432 * hardware interrupt handler or from a bottom half handler.
434 int smp_call_function(void (*func)(void *), void *info, int wait)
437 smp_call_function_many(cpu_online_mask, func, info, wait);
441 EXPORT_SYMBOL(smp_call_function);
443 void ipi_call_lock(void)
445 spin_lock(&call_function.lock);
448 void ipi_call_unlock(void)
450 spin_unlock(&call_function.lock);
453 void ipi_call_lock_irq(void)
455 spin_lock_irq(&call_function.lock);
458 void ipi_call_unlock_irq(void)
460 spin_unlock_irq(&call_function.lock);