X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=kernel%2Frcupdate.c;h=400183346ad22b0e216d21a429295e2cb28b123b;hb=54f4407608c59712a8f5ec1e10dfac40bef5a2e7;hp=c09605f8d16c9d1c0acfb5322f50ce3aabdfe568;hpb=fbf6bfca76d50abef478ba902b8597ecbadfd390;p=safe%2Fjmp%2Flinux-2.6 diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index c09605f..4001833 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c @@ -19,7 +19,7 @@ * * Authors: Dipankar Sarma * Manfred Spraul - * + * * Based on the original work by Paul McKenney * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. * Papers: @@ -27,7 +27,7 @@ * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) * * For detailed explanation of Read-Copy Update mechanism see - - * http://lse.sourceforge.net/locking/rcupdate.html + * http://lse.sourceforge.net/locking/rcupdate.html * */ #include @@ -39,28 +39,27 @@ #include #include #include -#include #include #include #include #include #include +#include -struct rcu_synchronize { - struct rcu_head head; - struct completion completion; -}; +#ifdef CONFIG_DEBUG_LOCK_ALLOC +static struct lock_class_key rcu_lock_key; +struct lockdep_map rcu_lock_map = + STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key); +EXPORT_SYMBOL_GPL(rcu_lock_map); +#endif -static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; -static atomic_t rcu_barrier_cpu_count; -static DEFINE_MUTEX(rcu_barrier_mutex); -static struct completion rcu_barrier_completion; +int rcu_scheduler_active __read_mostly; /* * Awaken the corresponding synchronize_rcu() instance now that a * grace period has elapsed. */ -static void wakeme_after_rcu(struct rcu_head *head) +void wakeme_after_rcu(struct rcu_head *head) { struct rcu_synchronize *rcu; @@ -68,6 +67,8 @@ static void wakeme_after_rcu(struct rcu_head *head) complete(&rcu->completion); } +#ifdef CONFIG_TREE_PREEMPT_RCU + /** * synchronize_rcu - wait until a grace period has elapsed. * @@ -81,61 +82,106 @@ void synchronize_rcu(void) { struct rcu_synchronize rcu; + if (!rcu_scheduler_active) + return; + init_completion(&rcu.completion); - /* Will wake me after RCU finished */ + /* Will wake me after RCU finished. */ call_rcu(&rcu.head, wakeme_after_rcu); - - /* Wait for it */ + /* Wait for it. */ wait_for_completion(&rcu.completion); } EXPORT_SYMBOL_GPL(synchronize_rcu); -static void rcu_barrier_callback(struct rcu_head *notused) -{ - if (atomic_dec_and_test(&rcu_barrier_cpu_count)) - complete(&rcu_barrier_completion); -} +#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ -/* - * Called with preemption disabled, and from cross-cpu IRQ context. +/** + * synchronize_sched - wait until an rcu-sched grace period has elapsed. + * + * Control will return to the caller some time after a full rcu-sched + * grace period has elapsed, in other words after all currently executing + * rcu-sched read-side critical sections have completed. These read-side + * critical sections are delimited by rcu_read_lock_sched() and + * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(), + * local_irq_disable(), and so on may be used in place of + * rcu_read_lock_sched(). + * + * This means that all preempt_disable code sequences, including NMI and + * hardware-interrupt handlers, in progress on entry will have completed + * before this primitive returns. However, this does not guarantee that + * softirq handlers will have completed, since in some kernels, these + * handlers can run in process context, and can block. + * + * This primitive provides the guarantees made by the (now removed) + * synchronize_kernel() API. In contrast, synchronize_rcu() only + * guarantees that rcu_read_lock() sections will have completed. + * In "classic RCU", these two guarantees happen to be one and + * the same, but can differ in realtime RCU implementations. */ -static void rcu_barrier_func(void *notused) +void synchronize_sched(void) { - int cpu = smp_processor_id(); - struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu); + struct rcu_synchronize rcu; + + if (rcu_blocking_is_gp()) + return; - atomic_inc(&rcu_barrier_cpu_count); - call_rcu(head, rcu_barrier_callback); + init_completion(&rcu.completion); + /* Will wake me after RCU finished. */ + call_rcu_sched(&rcu.head, wakeme_after_rcu); + /* Wait for it. */ + wait_for_completion(&rcu.completion); } +EXPORT_SYMBOL_GPL(synchronize_sched); /** - * rcu_barrier - Wait until all the in-flight RCUs are complete. + * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. + * + * Control will return to the caller some time after a full rcu_bh grace + * period has elapsed, in other words after all currently executing rcu_bh + * read-side critical sections have completed. RCU read-side critical + * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(), + * and may be nested. */ -void rcu_barrier(void) +void synchronize_rcu_bh(void) { - BUG_ON(in_interrupt()); - /* Take cpucontrol mutex to protect against CPU hotplug */ - mutex_lock(&rcu_barrier_mutex); - init_completion(&rcu_barrier_completion); - atomic_set(&rcu_barrier_cpu_count, 0); - /* - * The queueing of callbacks in all CPUs must be atomic with - * respect to RCU, otherwise one CPU may queue a callback, - * wait for a grace period, decrement barrier count and call - * complete(), while other CPUs have not yet queued anything. - * So, we need to make sure that grace periods cannot complete - * until all the callbacks are queued. - */ - rcu_read_lock(); - on_each_cpu(rcu_barrier_func, NULL, 0, 1); - rcu_read_unlock(); - wait_for_completion(&rcu_barrier_completion); - mutex_unlock(&rcu_barrier_mutex); + struct rcu_synchronize rcu; + + if (rcu_blocking_is_gp()) + return; + + init_completion(&rcu.completion); + /* Will wake me after RCU finished. */ + call_rcu_bh(&rcu.head, wakeme_after_rcu); + /* Wait for it. */ + wait_for_completion(&rcu.completion); +} +EXPORT_SYMBOL_GPL(synchronize_rcu_bh); + +static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + return rcu_cpu_notify(self, action, hcpu); } -EXPORT_SYMBOL_GPL(rcu_barrier); void __init rcu_init(void) { + int i; + __rcu_init(); + cpu_notifier(rcu_barrier_cpu_hotplug, 0); + + /* + * We don't need protection against CPU-hotplug here because + * this is called early in boot, before either interrupts + * or the scheduler are operational. + */ + for_each_online_cpu(i) + rcu_barrier_cpu_hotplug(NULL, CPU_UP_PREPARE, (void *)(long)i); } +void rcu_scheduler_starting(void) +{ + WARN_ON(num_online_cpus() != 1); + WARN_ON(nr_context_switches() > 0); + rcu_scheduler_active = 1; +}