2 * Read-Copy Update mechanism for mutual exclusion
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright IBM Corporation, 2001
20 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
21 * Manfred Spraul <manfred@colorfullife.com>
23 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
26 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
27 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
29 * For detailed explanation of Read-Copy Update mechanism see -
30 * http://lse.sourceforge.net/locking/rcupdate.html
33 #include <linux/types.h>
34 #include <linux/kernel.h>
35 #include <linux/init.h>
36 #include <linux/spinlock.h>
37 #include <linux/smp.h>
38 #include <linux/interrupt.h>
39 #include <linux/sched.h>
40 #include <asm/atomic.h>
41 #include <linux/bitops.h>
42 #include <linux/percpu.h>
43 #include <linux/notifier.h>
44 #include <linux/cpu.h>
45 #include <linux/mutex.h>
46 #include <linux/module.h>
47 #include <linux/kernel_stat.h>
49 #ifdef CONFIG_DEBUG_LOCK_ALLOC
50 static struct lock_class_key rcu_lock_key;
51 struct lockdep_map rcu_lock_map =
52 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
53 EXPORT_SYMBOL_GPL(rcu_lock_map);
56 static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
57 static atomic_t rcu_barrier_cpu_count;
58 static DEFINE_MUTEX(rcu_barrier_mutex);
59 static struct completion rcu_barrier_completion;
60 int rcu_scheduler_active __read_mostly;
62 static atomic_t rcu_migrate_type_count = ATOMIC_INIT(0);
63 static struct rcu_head rcu_migrate_head[3];
64 static DECLARE_WAIT_QUEUE_HEAD(rcu_migrate_wq);
67 * Awaken the corresponding synchronize_rcu() instance now that a
68 * grace period has elapsed.
70 void wakeme_after_rcu(struct rcu_head *head)
72 struct rcu_synchronize *rcu;
74 rcu = container_of(head, struct rcu_synchronize, head);
75 complete(&rcu->completion);
78 #ifdef CONFIG_TREE_PREEMPT_RCU
81 * synchronize_rcu - wait until a grace period has elapsed.
83 * Control will return to the caller some time after a full grace
84 * period has elapsed, in other words after all currently executing RCU
85 * read-side critical sections have completed. RCU read-side critical
86 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
89 void synchronize_rcu(void)
91 struct rcu_synchronize rcu;
93 if (!rcu_scheduler_active)
96 init_completion(&rcu.completion);
97 /* Will wake me after RCU finished. */
98 call_rcu(&rcu.head, wakeme_after_rcu);
100 wait_for_completion(&rcu.completion);
102 EXPORT_SYMBOL_GPL(synchronize_rcu);
104 #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
107 * synchronize_sched - wait until an rcu-sched grace period has elapsed.
109 * Control will return to the caller some time after a full rcu-sched
110 * grace period has elapsed, in other words after all currently executing
111 * rcu-sched read-side critical sections have completed. These read-side
112 * critical sections are delimited by rcu_read_lock_sched() and
113 * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(),
114 * local_irq_disable(), and so on may be used in place of
115 * rcu_read_lock_sched().
117 * This means that all preempt_disable code sequences, including NMI and
118 * hardware-interrupt handlers, in progress on entry will have completed
119 * before this primitive returns. However, this does not guarantee that
120 * softirq handlers will have completed, since in some kernels, these
121 * handlers can run in process context, and can block.
123 * This primitive provides the guarantees made by the (now removed)
124 * synchronize_kernel() API. In contrast, synchronize_rcu() only
125 * guarantees that rcu_read_lock() sections will have completed.
126 * In "classic RCU", these two guarantees happen to be one and
127 * the same, but can differ in realtime RCU implementations.
129 void synchronize_sched(void)
131 struct rcu_synchronize rcu;
133 if (rcu_blocking_is_gp())
136 init_completion(&rcu.completion);
137 /* Will wake me after RCU finished. */
138 call_rcu_sched(&rcu.head, wakeme_after_rcu);
140 wait_for_completion(&rcu.completion);
142 EXPORT_SYMBOL_GPL(synchronize_sched);
145 * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
147 * Control will return to the caller some time after a full rcu_bh grace
148 * period has elapsed, in other words after all currently executing rcu_bh
149 * read-side critical sections have completed. RCU read-side critical
150 * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
153 void synchronize_rcu_bh(void)
155 struct rcu_synchronize rcu;
157 if (rcu_blocking_is_gp())
160 init_completion(&rcu.completion);
161 /* Will wake me after RCU finished. */
162 call_rcu_bh(&rcu.head, wakeme_after_rcu);
164 wait_for_completion(&rcu.completion);
166 EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
168 static void rcu_barrier_callback(struct rcu_head *notused)
170 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
171 complete(&rcu_barrier_completion);
175 * Called with preemption disabled, and from cross-cpu IRQ context.
177 static void rcu_barrier_func(void *type)
179 int cpu = smp_processor_id();
180 struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu);
181 void (*call_rcu_func)(struct rcu_head *head,
182 void (*func)(struct rcu_head *head));
184 atomic_inc(&rcu_barrier_cpu_count);
185 call_rcu_func = type;
186 call_rcu_func(head, rcu_barrier_callback);
189 static inline void wait_migrated_callbacks(void)
191 wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count));
192 smp_mb(); /* In case we didn't sleep. */
196 * Orchestrate the specified type of RCU barrier, waiting for all
197 * RCU callbacks of the specified type to complete.
199 static void _rcu_barrier(void (*call_rcu_func)(struct rcu_head *head,
200 void (*func)(struct rcu_head *head)))
202 BUG_ON(in_interrupt());
203 /* Take cpucontrol mutex to protect against CPU hotplug */
204 mutex_lock(&rcu_barrier_mutex);
205 init_completion(&rcu_barrier_completion);
207 * Initialize rcu_barrier_cpu_count to 1, then invoke
208 * rcu_barrier_func() on each CPU, so that each CPU also has
209 * incremented rcu_barrier_cpu_count. Only then is it safe to
210 * decrement rcu_barrier_cpu_count -- otherwise the first CPU
211 * might complete its grace period before all of the other CPUs
212 * did their increment, causing this function to return too
215 atomic_set(&rcu_barrier_cpu_count, 1);
216 on_each_cpu(rcu_barrier_func, (void *)call_rcu_func, 1);
217 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
218 complete(&rcu_barrier_completion);
219 wait_for_completion(&rcu_barrier_completion);
220 mutex_unlock(&rcu_barrier_mutex);
221 wait_migrated_callbacks();
225 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
227 void rcu_barrier(void)
229 _rcu_barrier(call_rcu);
231 EXPORT_SYMBOL_GPL(rcu_barrier);
234 * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
236 void rcu_barrier_bh(void)
238 _rcu_barrier(call_rcu_bh);
240 EXPORT_SYMBOL_GPL(rcu_barrier_bh);
243 * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
245 void rcu_barrier_sched(void)
247 _rcu_barrier(call_rcu_sched);
249 EXPORT_SYMBOL_GPL(rcu_barrier_sched);
251 static void rcu_migrate_callback(struct rcu_head *notused)
253 if (atomic_dec_and_test(&rcu_migrate_type_count))
254 wake_up(&rcu_migrate_wq);
257 static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self,
258 unsigned long action, void *hcpu)
260 rcu_cpu_notify(self, action, hcpu);
261 if (action == CPU_DYING) {
263 * preempt_disable() in on_each_cpu() prevents stop_machine(),
264 * so when "on_each_cpu(rcu_barrier_func, (void *)type, 1);"
265 * returns, all online cpus have queued rcu_barrier_func(),
266 * and the dead cpu(if it exist) queues rcu_migrate_callback()s.
268 * These callbacks ensure _rcu_barrier() waits for all
269 * RCU callbacks of the specified type to complete.
271 atomic_set(&rcu_migrate_type_count, 3);
272 call_rcu_bh(rcu_migrate_head, rcu_migrate_callback);
273 call_rcu_sched(rcu_migrate_head + 1, rcu_migrate_callback);
274 call_rcu(rcu_migrate_head + 2, rcu_migrate_callback);
275 } else if (action == CPU_DOWN_PREPARE) {
276 /* Don't need to wait until next removal operation. */
277 /* rcu_migrate_head is protected by cpu_add_remove_lock */
278 wait_migrated_callbacks();
284 void __init rcu_init(void)
289 cpu_notifier(rcu_barrier_cpu_hotplug, 0);
292 * We don't need protection against CPU-hotplug here because
293 * this is called early in boot, before either interrupts
294 * or the scheduler are operational.
296 for_each_online_cpu(i)
297 rcu_barrier_cpu_hotplug(NULL, CPU_UP_PREPARE, (void *)(long)i);
300 void rcu_scheduler_starting(void)
302 WARN_ON(num_online_cpus() != 1);
303 WARN_ON(nr_context_switches() > 0);
304 rcu_scheduler_active = 1;