X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=kernel%2Frcupdate.c;h=63fe25433980a943096634841d0ce5ff0ed33f7f;hb=a3a2e76c77fa22b114e421ac11dec0c56c3503fb;hp=6addab5e6d8884cb21910965c0dd74f658546664;hpb=15c8b6c1aaaf1c4edd67e2f02e4d8e1bd1a51c0d;p=safe%2Fjmp%2Flinux-2.6 diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 6addab5..63fe254 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c @@ -19,7 +19,7 @@ * * Authors: Dipankar Sarma * Manfred Spraul - * + * * Based on the original work by Paul McKenney * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. * Papers: @@ -27,7 +27,7 @@ * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) * * For detailed explanation of Read-Copy Update mechanism see - - * http://lse.sourceforge.net/locking/rcupdate.html + * http://lse.sourceforge.net/locking/rcupdate.html * */ #include @@ -39,103 +39,79 @@ #include #include #include -#include #include #include #include #include #include +#include +#include -struct rcu_synchronize { - struct rcu_head head; - struct completion completion; -}; +#ifdef CONFIG_DEBUG_LOCK_ALLOC +static struct lock_class_key rcu_lock_key; +struct lockdep_map rcu_lock_map = + STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key); +EXPORT_SYMBOL_GPL(rcu_lock_map); -static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; -static atomic_t rcu_barrier_cpu_count; -static DEFINE_MUTEX(rcu_barrier_mutex); -static struct completion rcu_barrier_completion; +static struct lock_class_key rcu_bh_lock_key; +struct lockdep_map rcu_bh_lock_map = + STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key); +EXPORT_SYMBOL_GPL(rcu_bh_lock_map); -/* - * Awaken the corresponding synchronize_rcu() instance now that a - * grace period has elapsed. - */ -static void wakeme_after_rcu(struct rcu_head *head) -{ - struct rcu_synchronize *rcu; +static struct lock_class_key rcu_sched_lock_key; +struct lockdep_map rcu_sched_lock_map = + STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key); +EXPORT_SYMBOL_GPL(rcu_sched_lock_map); +#endif - rcu = container_of(head, struct rcu_synchronize, head); - complete(&rcu->completion); -} +int rcu_scheduler_active __read_mostly; +EXPORT_SYMBOL_GPL(rcu_scheduler_active); + +#ifdef CONFIG_DEBUG_LOCK_ALLOC /** - * synchronize_rcu - wait until a grace period has elapsed. + * rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section? * - * Control will return to the caller some time after a full grace - * period has elapsed, in other words after all currently executing RCU - * read-side critical sections have completed. RCU read-side critical - * sections are delimited by rcu_read_lock() and rcu_read_unlock(), - * and may be nested. + * Check for bottom half being disabled, which covers both the + * CONFIG_PROVE_RCU and not cases. Note that if someone uses + * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled) + * will show the situation. + * + * Check debug_lockdep_rcu_enabled() to prevent false positives during boot. */ -void synchronize_rcu(void) +int rcu_read_lock_bh_held(void) { - struct rcu_synchronize rcu; - - init_completion(&rcu.completion); - /* Will wake me after RCU finished */ - call_rcu(&rcu.head, wakeme_after_rcu); - - /* Wait for it */ - wait_for_completion(&rcu.completion); + if (!debug_lockdep_rcu_enabled()) + return 1; + return in_softirq(); } -EXPORT_SYMBOL_GPL(synchronize_rcu); +EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); -static void rcu_barrier_callback(struct rcu_head *notused) -{ - if (atomic_dec_and_test(&rcu_barrier_cpu_count)) - complete(&rcu_barrier_completion); -} +#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ /* - * Called with preemption disabled, and from cross-cpu IRQ context. + * This function is invoked towards the end of the scheduler's initialization + * process. Before this is called, the idle task might contain + * RCU read-side critical sections (during which time, this idle + * task is booting the system). After this function is called, the + * idle tasks are prohibited from containing RCU read-side critical + * sections. */ -static void rcu_barrier_func(void *notused) +void rcu_scheduler_starting(void) { - int cpu = smp_processor_id(); - struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu); - - atomic_inc(&rcu_barrier_cpu_count); - call_rcu(head, rcu_barrier_callback); + WARN_ON(num_online_cpus() != 1); + WARN_ON(nr_context_switches() > 0); + rcu_scheduler_active = 1; } -/** - * rcu_barrier - Wait until all the in-flight RCUs are complete. +/* + * Awaken the corresponding synchronize_rcu() instance now that a + * grace period has elapsed. */ -void rcu_barrier(void) +void wakeme_after_rcu(struct rcu_head *head) { - BUG_ON(in_interrupt()); - /* Take cpucontrol mutex to protect against CPU hotplug */ - mutex_lock(&rcu_barrier_mutex); - init_completion(&rcu_barrier_completion); - atomic_set(&rcu_barrier_cpu_count, 0); - /* - * The queueing of callbacks in all CPUs must be atomic with - * respect to RCU, otherwise one CPU may queue a callback, - * wait for a grace period, decrement barrier count and call - * complete(), while other CPUs have not yet queued anything. - * So, we need to make sure that grace periods cannot complete - * until all the callbacks are queued. - */ - rcu_read_lock(); - on_each_cpu(rcu_barrier_func, NULL, 1); - rcu_read_unlock(); - wait_for_completion(&rcu_barrier_completion); - mutex_unlock(&rcu_barrier_mutex); -} -EXPORT_SYMBOL_GPL(rcu_barrier); + struct rcu_synchronize *rcu; -void __init rcu_init(void) -{ - __rcu_init(); + rcu = container_of(head, struct rcu_synchronize, head); + complete(&rcu->completion); } -