2 * Generic helpers for smp ipi calls
4 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
7 #include <linux/init.h>
8 #include <linux/module.h>
9 #include <linux/percpu.h>
10 #include <linux/rcupdate.h>
11 #include <linux/rculist.h>
12 #include <linux/smp.h>
14 static DEFINE_PER_CPU(struct call_single_queue, call_single_queue);
15 static LIST_HEAD(call_function_queue);
16 __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_function_lock);
20 CSD_FLAG_ALLOC = 0x02,
24 struct call_function_data {
25 struct call_single_data csd;
28 struct rcu_head rcu_head;
29 unsigned long cpumask_bits[];
32 struct call_single_queue {
33 struct list_head list;
37 static int __cpuinit init_call_single_data(void)
41 for_each_possible_cpu(i) {
42 struct call_single_queue *q = &per_cpu(call_single_queue, i);
44 spin_lock_init(&q->lock);
45 INIT_LIST_HEAD(&q->list);
49 early_initcall(init_call_single_data);
51 static void csd_flag_wait(struct call_single_data *data)
53 /* Wait for response */
55 if (!(data->flags & CSD_FLAG_WAIT))
62 * Insert a previously allocated call_single_data element for execution
63 * on the given CPU. data must already have ->func, ->info, and ->flags set.
65 static void generic_exec_single(int cpu, struct call_single_data *data)
67 struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
68 int wait = data->flags & CSD_FLAG_WAIT, ipi;
71 spin_lock_irqsave(&dst->lock, flags);
72 ipi = list_empty(&dst->list);
73 list_add_tail(&data->list, &dst->list);
74 spin_unlock_irqrestore(&dst->lock, flags);
77 * The list addition should be visible before sending the IPI
78 * handler locks the list to pull the entry off it because of
79 * normal cache coherency rules implied by spinlocks.
81 * If IPIs can go out of order to the cache coherency protocol
82 * in an architecture, sufficient synchronisation should be added
83 * to arch code to make it appear to obey cache coherency WRT
84 * locking and barrier primitives. Generic code isn't really equipped
85 * to do the right thing...
89 arch_send_call_function_single_ipi(cpu);
95 static void rcu_free_call_data(struct rcu_head *head)
97 struct call_function_data *data;
99 data = container_of(head, struct call_function_data, rcu_head);
105 * Invoked by arch to handle an IPI for call function. Must be called with
106 * interrupts disabled.
108 void generic_smp_call_function_interrupt(void)
110 struct call_function_data *data;
114 * Ensure entry is visible on call_function_queue after we have
115 * entered the IPI. See comment in smp_call_function_many.
116 * If we don't have this, then we may miss an entry on the list
117 * and never get another IPI to process it.
122 * It's ok to use list_for_each_rcu() here even though we may delete
123 * 'pos', since list_del_rcu() doesn't clear ->next
126 list_for_each_entry_rcu(data, &call_function_queue, csd.list) {
129 if (!cpumask_test_cpu(cpu, to_cpumask(data->cpumask_bits)))
132 data->csd.func(data->csd.info);
134 spin_lock(&data->lock);
135 cpumask_clear_cpu(cpu, to_cpumask(data->cpumask_bits));
136 WARN_ON(data->refs == 0);
139 spin_unlock(&data->lock);
144 spin_lock(&call_function_lock);
145 list_del_rcu(&data->csd.list);
146 spin_unlock(&call_function_lock);
148 if (data->csd.flags & CSD_FLAG_WAIT) {
150 * serialize stores to data with the flag clear
154 data->csd.flags &= ~CSD_FLAG_WAIT;
156 if (data->csd.flags & CSD_FLAG_ALLOC)
157 call_rcu(&data->rcu_head, rcu_free_call_data);
165 * Invoked by arch to handle an IPI for call function single. Must be called
166 * from the arch with interrupts disabled.
168 void generic_smp_call_function_single_interrupt(void)
170 struct call_single_queue *q = &__get_cpu_var(call_single_queue);
172 unsigned int data_flags;
175 list_replace_init(&q->list, &list);
176 spin_unlock(&q->lock);
178 while (!list_empty(&list)) {
179 struct call_single_data *data;
181 data = list_entry(list.next, struct call_single_data,
183 list_del(&data->list);
186 * 'data' can be invalid after this call if
187 * flags == 0 (when called through
188 * generic_exec_single(), so save them away before
191 data_flags = data->flags;
193 data->func(data->info);
195 if (data_flags & CSD_FLAG_WAIT) {
197 data->flags &= ~CSD_FLAG_WAIT;
198 } else if (data_flags & CSD_FLAG_LOCK) {
200 data->flags &= ~CSD_FLAG_LOCK;
201 } else if (data_flags & CSD_FLAG_ALLOC)
206 static DEFINE_PER_CPU(struct call_single_data, csd_data);
209 * smp_call_function_single - Run a function on a specific CPU
210 * @func: The function to run. This must be fast and non-blocking.
211 * @info: An arbitrary pointer to pass to the function.
212 * @wait: If true, wait until function has completed on other CPUs.
214 * Returns 0 on success, else a negative status code. Note that @wait
215 * will be implicitly turned on in case of allocation failures, since
216 * we fall back to on-stack allocation.
218 int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
221 struct call_single_data d;
223 /* prevent preemption and reschedule on another processor,
224 as well as CPU removal */
228 /* Can deadlock when called with interrupts disabled */
229 WARN_ON(irqs_disabled());
232 local_irq_save(flags);
234 local_irq_restore(flags);
235 } else if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
236 struct call_single_data *data;
240 * We are calling a function on a single CPU
241 * and we are not going to wait for it to finish.
242 * We first try to allocate the data, but if we
243 * fail, we fall back to use a per cpu data to pass
244 * the information to that CPU. Since all callers
245 * of this code will use the same data, we must
246 * synchronize the callers to prevent a new caller
247 * from corrupting the data before the callee
250 * The CSD_FLAG_LOCK is used to let us know when
251 * the IPI handler is done with the data.
252 * The first caller will set it, and the callee
253 * will clear it. The next caller must wait for
254 * it to clear before we set it again. This
255 * will make sure the callee is done with the
256 * data before a new caller will use it.
258 data = kmalloc(sizeof(*data), GFP_ATOMIC);
260 data->flags = CSD_FLAG_ALLOC;
262 data = &per_cpu(csd_data, me);
263 while (data->flags & CSD_FLAG_LOCK)
265 data->flags = CSD_FLAG_LOCK;
269 data->flags = CSD_FLAG_WAIT;
274 generic_exec_single(cpu, data);
276 err = -ENXIO; /* CPU not online */
282 EXPORT_SYMBOL(smp_call_function_single);
285 * __smp_call_function_single(): Run a function on another CPU
286 * @cpu: The CPU to run on.
287 * @data: Pre-allocated and setup data structure
289 * Like smp_call_function_single(), but allow caller to pass in a pre-allocated
290 * data structure. Useful for embedding @data inside other structures, for
294 void __smp_call_function_single(int cpu, struct call_single_data *data)
296 /* Can deadlock when called with interrupts disabled */
297 WARN_ON((data->flags & CSD_FLAG_WAIT) && irqs_disabled());
299 generic_exec_single(cpu, data);
302 /* FIXME: Shim for archs using old arch_send_call_function_ipi API. */
303 #ifndef arch_send_call_function_ipi_mask
304 #define arch_send_call_function_ipi_mask(maskp) \
305 arch_send_call_function_ipi(*(maskp))
309 * smp_call_function_many(): Run a function on a set of other CPUs.
310 * @mask: The set of cpus to run on (only runs on online subset).
311 * @func: The function to run. This must be fast and non-blocking.
312 * @info: An arbitrary pointer to pass to the function.
313 * @wait: If true, wait (atomically) until function has completed on other CPUs.
315 * If @wait is true, then returns once @func has returned. Note that @wait
316 * will be implicitly turned on in case of allocation failures, since
317 * we fall back to on-stack allocation.
319 * You must not call this function with disabled interrupts or from a
320 * hardware interrupt handler or from a bottom half handler. Preemption
321 * must be disabled when calling this function.
323 void smp_call_function_many(const struct cpumask *mask,
324 void (*func)(void *), void *info,
327 struct call_function_data *data;
331 /* Can deadlock when called with interrupts disabled */
332 WARN_ON(irqs_disabled());
334 /* So, what's a CPU they want? Ignoring this one. */
335 cpu = cpumask_first_and(mask, cpu_online_mask);
336 if (cpu == smp_processor_id())
337 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
338 /* No online cpus? We're done. */
339 if (cpu >= nr_cpu_ids)
342 /* Do we have another CPU which isn't us? */
343 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
344 if (next_cpu == smp_processor_id())
345 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
347 /* Fastpath: do that cpu by itself. */
348 if (next_cpu >= nr_cpu_ids) {
349 smp_call_function_single(cpu, func, info, wait);
353 data = kmalloc(sizeof(*data) + cpumask_size(), GFP_ATOMIC);
354 if (unlikely(!data)) {
356 for_each_online_cpu(cpu) {
357 if (cpu == smp_processor_id())
359 if (cpumask_test_cpu(cpu, mask))
360 smp_call_function_single(cpu, func, info, wait);
365 spin_lock_init(&data->lock);
366 data->csd.flags = CSD_FLAG_ALLOC;
368 data->csd.flags |= CSD_FLAG_WAIT;
369 data->csd.func = func;
370 data->csd.info = info;
371 cpumask_and(to_cpumask(data->cpumask_bits), mask, cpu_online_mask);
372 cpumask_clear_cpu(smp_processor_id(), to_cpumask(data->cpumask_bits));
373 data->refs = cpumask_weight(to_cpumask(data->cpumask_bits));
375 spin_lock_irqsave(&call_function_lock, flags);
376 list_add_tail_rcu(&data->csd.list, &call_function_queue);
377 spin_unlock_irqrestore(&call_function_lock, flags);
380 * Make the list addition visible before sending the ipi.
381 * (IPIs must obey or appear to obey normal Linux cache coherency
382 * rules -- see comment in generic_exec_single).
386 /* Send a message to all CPUs in the map */
387 arch_send_call_function_ipi_mask(to_cpumask(data->cpumask_bits));
389 /* optionally wait for the CPUs to complete */
391 csd_flag_wait(&data->csd);
393 EXPORT_SYMBOL(smp_call_function_many);
396 * smp_call_function(): Run a function on all other CPUs.
397 * @func: The function to run. This must be fast and non-blocking.
398 * @info: An arbitrary pointer to pass to the function.
399 * @wait: If true, wait (atomically) until function has completed on other CPUs.
403 * If @wait is true, then returns once @func has returned; otherwise
404 * it returns just before the target cpu calls @func. In case of allocation
405 * failure, @wait will be implicitly turned on.
407 * You must not call this function with disabled interrupts or from a
408 * hardware interrupt handler or from a bottom half handler.
410 int smp_call_function(void (*func)(void *), void *info, int wait)
413 smp_call_function_many(cpu_online_mask, func, info, wait);
417 EXPORT_SYMBOL(smp_call_function);
419 void ipi_call_lock(void)
421 spin_lock(&call_function_lock);
424 void ipi_call_unlock(void)
426 spin_unlock(&call_function_lock);
429 void ipi_call_lock_irq(void)
431 spin_lock_irq(&call_function_lock);
434 void ipi_call_unlock_irq(void)
436 spin_unlock_irq(&call_function_lock);