X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=include%2Flinux%2Frcupdate.h;h=d8fb2abcf30339dd5a78f922452d3b8a0b91f034;hb=da848c47bc6e873a54a445ea1960423a495b6b32;hp=c2ec6c77874eacc0cd7a340b7acc7c386aea0ead;hpb=21a1ea9eb40411d4ee29448c53b9e4c0654d6ceb;p=safe%2Fjmp%2Flinux-2.6 diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index c2ec6c7..d8fb2ab 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -1,5 +1,5 @@ /* - * Read-Copy Update mechanism for mutual exclusion + * Read-Copy Update mechanism for mutual exclusion * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -15,32 +15,35 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * - * Copyright (C) IBM Corporation, 2001 + * Copyright IBM Corporation, 2001 * * Author: Dipankar Sarma - * - * Based on the original work by Paul McKenney + * + * Based on the original work by Paul McKenney * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. * Papers: * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) * * For detailed explanation of Read-Copy Update mechanism see - - * http://lse.sourceforge.net/locking/rcupdate.html + * http://lse.sourceforge.net/locking/rcupdate.html * */ #ifndef __LINUX_RCUPDATE_H #define __LINUX_RCUPDATE_H -#ifdef __KERNEL__ - #include #include #include -#include #include #include +#include +#include + +#ifdef CONFIG_RCU_TORTURE_TEST +extern int rcutorture_runnable; /* for sysctl */ +#endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ /** * struct rcu_head - callback structure for use with RCU @@ -52,88 +55,222 @@ struct rcu_head { void (*func)(struct rcu_head *head); }; -#define RCU_HEAD_INIT { .next = NULL, .func = NULL } +/* Exported common interfaces */ +extern void rcu_barrier(void); +extern void rcu_barrier_bh(void); +extern void rcu_barrier_sched(void); +extern void synchronize_sched_expedited(void); +extern int sched_expedited_torture_stats(char *page); + +/* Internal to kernel */ +extern void rcu_init(void); +extern int rcu_scheduler_active; +extern void rcu_scheduler_starting(void); + +#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) +#include +#elif defined(CONFIG_TINY_RCU) +#include +#else +#error "Unknown RCU implementation specified to kernel configuration" +#endif + +#define RCU_HEAD_INIT { .next = NULL, .func = NULL } #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT #define INIT_RCU_HEAD(ptr) do { \ (ptr)->next = NULL; (ptr)->func = NULL; \ } while (0) +#ifdef CONFIG_DEBUG_LOCK_ALLOC +extern struct lockdep_map rcu_lock_map; +# define rcu_read_acquire() \ + lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) +# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) -/* Global control variables for rcupdate callback mechanism. */ -struct rcu_ctrlblk { - long cur; /* Current batch number. */ - long completed; /* Number of the last completed batch */ - int next_pending; /* Is the next batch already waiting? */ +extern struct lockdep_map rcu_bh_lock_map; +# define rcu_read_acquire_bh() \ + lock_acquire(&rcu_bh_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) +# define rcu_read_release_bh() lock_release(&rcu_bh_lock_map, 1, _THIS_IP_) - spinlock_t lock ____cacheline_internodealigned_in_smp; - cpumask_t cpumask; /* CPUs that need to switch in order */ - /* for current batch to proceed. */ -} ____cacheline_internodealigned_in_smp; +extern struct lockdep_map rcu_sched_lock_map; +# define rcu_read_acquire_sched() \ + lock_acquire(&rcu_sched_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) +# define rcu_read_release_sched() \ + lock_release(&rcu_sched_lock_map, 1, _THIS_IP_) -/* Is batch a before batch b ? */ -static inline int rcu_batch_before(long a, long b) -{ - return (a - b) < 0; -} +extern int debug_lockdep_rcu_enabled(void); -/* Is batch a after batch b ? */ -static inline int rcu_batch_after(long a, long b) +/** + * rcu_read_lock_held - might we be in RCU read-side critical section? + * + * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU + * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, + * this assumes we are in an RCU read-side critical section unless it can + * prove otherwise. + * + * Check debug_lockdep_rcu_enabled() to prevent false positives during boot + * and while lockdep is disabled. + */ +static inline int rcu_read_lock_held(void) { - return (a - b) > 0; + if (!debug_lockdep_rcu_enabled()) + return 1; + return lock_is_held(&rcu_lock_map); } /* - * Per-CPU data for Read-Copy UPdate. - * nxtlist - new callbacks are added here - * curlist - current batch for which quiescent cycle started if any + * rcu_read_lock_bh_held() is defined out of line to avoid #include-file + * hell. */ -struct rcu_data { - /* 1) quiescent state handling : */ - long quiescbatch; /* Batch # for grace period */ - int passed_quiesc; /* User-mode/idle loop etc. */ - int qs_pending; /* core waits for quiesc state */ - - /* 2) batch handling */ - long batch; /* Batch # for current RCU batch */ - struct rcu_head *nxtlist; - struct rcu_head **nxttail; - long qlen; /* # of queued callbacks */ - struct rcu_head *curlist; - struct rcu_head **curtail; - struct rcu_head *donelist; - struct rcu_head **donetail; - long blimit; /* Upper limit on a processed batch */ - int cpu; - struct rcu_head barrier; -#ifdef CONFIG_SMP - long last_rs_qlen; /* qlen during the last resched */ -#endif -}; - -DECLARE_PER_CPU(struct rcu_data, rcu_data); -DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); -extern struct rcu_ctrlblk rcu_ctrlblk; -extern struct rcu_ctrlblk rcu_bh_ctrlblk; +extern int rcu_read_lock_bh_held(void); -/* - * Increment the quiescent state counter. - * The counter is a bit degenerated: We do not need to know - * how many quiescent states passed, just if there was at least - * one since the start of the grace period. Thus just a flag. +/** + * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section? + * + * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an + * RCU-sched read-side critical section. In absence of + * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side + * critical section unless it can prove otherwise. Note that disabling + * of preemption (including disabling irqs) counts as an RCU-sched + * read-side critical section. + * + * Check debug_lockdep_rcu_enabled() to prevent false positives during boot + * and while lockdep is disabled. */ -static inline void rcu_qsctr_inc(int cpu) +#ifdef CONFIG_PREEMPT +static inline int rcu_read_lock_sched_held(void) { - struct rcu_data *rdp = &per_cpu(rcu_data, cpu); - rdp->passed_quiesc = 1; + int lockdep_opinion = 0; + + if (!debug_lockdep_rcu_enabled()) + return 1; + if (debug_locks) + lockdep_opinion = lock_is_held(&rcu_sched_lock_map); + return lockdep_opinion || preempt_count() != 0 || irqs_disabled(); } -static inline void rcu_bh_qsctr_inc(int cpu) +#else /* #ifdef CONFIG_PREEMPT */ +static inline int rcu_read_lock_sched_held(void) { - struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); - rdp->passed_quiesc = 1; + return 1; } +#endif /* #else #ifdef CONFIG_PREEMPT */ + +#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ -extern int rcu_pending(int cpu); +# define rcu_read_acquire() do { } while (0) +# define rcu_read_release() do { } while (0) +# define rcu_read_acquire_bh() do { } while (0) +# define rcu_read_release_bh() do { } while (0) +# define rcu_read_acquire_sched() do { } while (0) +# define rcu_read_release_sched() do { } while (0) + +static inline int rcu_read_lock_held(void) +{ + return 1; +} + +static inline int rcu_read_lock_bh_held(void) +{ + return 1; +} + +#ifdef CONFIG_PREEMPT +static inline int rcu_read_lock_sched_held(void) +{ + return !rcu_scheduler_active || preempt_count() != 0 || irqs_disabled(); +} +#else /* #ifdef CONFIG_PREEMPT */ +static inline int rcu_read_lock_sched_held(void) +{ + return 1; +} +#endif /* #else #ifdef CONFIG_PREEMPT */ + +#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ + +#ifdef CONFIG_PROVE_RCU + +extern int rcu_my_thread_group_empty(void); + +#define __do_rcu_dereference_check(c) \ + do { \ + static bool __warned; \ + if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \ + __warned = true; \ + lockdep_rcu_dereference(__FILE__, __LINE__); \ + } \ + } while (0) + +/** + * rcu_dereference_check - rcu_dereference with debug checking + * @p: The pointer to read, prior to dereferencing + * @c: The conditions under which the dereference will take place + * + * Do an rcu_dereference(), but check that the conditions under which the + * dereference will take place are correct. Typically the conditions indicate + * the various locking conditions that should be held at that point. The check + * should return true if the conditions are satisfied. + * + * For example: + * + * bar = rcu_dereference_check(foo->bar, rcu_read_lock_held() || + * lockdep_is_held(&foo->lock)); + * + * could be used to indicate to lockdep that foo->bar may only be dereferenced + * if either the RCU read lock is held, or that the lock required to replace + * the bar struct at foo->bar is held. + * + * Note that the list of conditions may also include indications of when a lock + * need not be held, for example during initialisation or destruction of the + * target struct: + * + * bar = rcu_dereference_check(foo->bar, rcu_read_lock_held() || + * lockdep_is_held(&foo->lock) || + * atomic_read(&foo->usage) == 0); + */ +#define rcu_dereference_check(p, c) \ + ({ \ + __do_rcu_dereference_check(c); \ + rcu_dereference_raw(p); \ + }) + +/** + * rcu_dereference_protected - fetch RCU pointer when updates prevented + * + * Return the value of the specified RCU-protected pointer, but omit + * both the smp_read_barrier_depends() and the ACCESS_ONCE(). This + * is useful in cases where update-side locks prevent the value of the + * pointer from changing. Please note that this primitive does -not- + * prevent the compiler from repeating this reference or combining it + * with other references, so it should not be used without protection + * of appropriate locks. + */ +#define rcu_dereference_protected(p, c) \ + ({ \ + __do_rcu_dereference_check(c); \ + (p); \ + }) + +#else /* #ifdef CONFIG_PROVE_RCU */ + +#define rcu_dereference_check(p, c) rcu_dereference_raw(p) +#define rcu_dereference_protected(p, c) (p) + +#endif /* #else #ifdef CONFIG_PROVE_RCU */ + +/** + * rcu_access_pointer - fetch RCU pointer with no dereferencing + * + * Return the value of the specified RCU-protected pointer, but omit the + * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful + * when the value of this pointer is accessed, but the pointer is not + * dereferenced, for example, when testing an RCU-protected pointer against + * NULL. This may also be used in cases where update-side locks prevent + * the value of the pointer from changing, but rcu_dereference_protected() + * is a lighter-weight primitive for this use case. + */ +#define rcu_access_pointer(p) ACCESS_ONCE(p) /** * rcu_read_lock - mark the beginning of an RCU read-side critical section. @@ -164,14 +301,12 @@ extern int rcu_pending(int cpu); * * It is illegal to block while in an RCU read-side critical section. */ -#define rcu_read_lock() preempt_disable() - -/** - * rcu_read_unlock - marks the end of an RCU read-side critical section. - * - * See rcu_read_lock() for more information. - */ -#define rcu_read_unlock() preempt_enable() +static inline void rcu_read_lock(void) +{ + __rcu_read_lock(); + __acquire(RCU); + rcu_read_acquire(); +} /* * So where is rcu_write_lock()? It does not exist, as there is no @@ -184,6 +319,18 @@ extern int rcu_pending(int cpu); */ /** + * rcu_read_unlock - marks the end of an RCU read-side critical section. + * + * See rcu_read_lock() for more information. + */ +static inline void rcu_read_unlock(void) +{ + rcu_read_release(); + __release(RCU); + __rcu_read_unlock(); +} + +/** * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section * * This is equivalent of rcu_read_lock(), but to be used when updates @@ -194,32 +341,112 @@ extern int rcu_pending(int cpu); * can use just rcu_read_lock(). * */ -#define rcu_read_lock_bh() local_bh_disable() +static inline void rcu_read_lock_bh(void) +{ + __rcu_read_lock_bh(); + __acquire(RCU_BH); + rcu_read_acquire_bh(); +} /* * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section * * See rcu_read_lock_bh() for more information. */ -#define rcu_read_unlock_bh() local_bh_enable() +static inline void rcu_read_unlock_bh(void) +{ + rcu_read_release_bh(); + __release(RCU_BH); + __rcu_read_unlock_bh(); +} /** - * rcu_dereference - fetch an RCU-protected pointer in an - * RCU read-side critical section. This pointer may later - * be safely dereferenced. + * rcu_read_lock_sched - mark the beginning of a RCU-classic critical section + * + * Should be used with either + * - synchronize_sched() + * or + * - call_rcu_sched() and rcu_barrier_sched() + * on the write-side to insure proper synchronization. + */ +static inline void rcu_read_lock_sched(void) +{ + preempt_disable(); + __acquire(RCU_SCHED); + rcu_read_acquire_sched(); +} + +/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ +static inline notrace void rcu_read_lock_sched_notrace(void) +{ + preempt_disable_notrace(); + __acquire(RCU_SCHED); +} + +/* + * rcu_read_unlock_sched - marks the end of a RCU-classic critical section + * + * See rcu_read_lock_sched for more information. + */ +static inline void rcu_read_unlock_sched(void) +{ + rcu_read_release_sched(); + __release(RCU_SCHED); + preempt_enable(); +} + +/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ +static inline notrace void rcu_read_unlock_sched_notrace(void) +{ + __release(RCU_SCHED); + preempt_enable_notrace(); +} + + +/** + * rcu_dereference_raw - fetch an RCU-protected pointer + * + * The caller must be within some flavor of RCU read-side critical + * section, or must be otherwise preventing the pointer from changing, + * for example, by holding an appropriate lock. This pointer may later + * be safely dereferenced. It is the caller's responsibility to have + * done the right thing, as this primitive does no checking of any kind. * * Inserts memory barriers on architectures that require them * (currently only the Alpha), and, more importantly, documents * exactly which pointers are protected by RCU. */ - -#define rcu_dereference(p) ({ \ - typeof(p) _________p1 = p; \ +#define rcu_dereference_raw(p) ({ \ + typeof(p) _________p1 = ACCESS_ONCE(p); \ smp_read_barrier_depends(); \ (_________p1); \ }) /** + * rcu_dereference - fetch an RCU-protected pointer, checking for RCU + * + * Makes rcu_dereference_check() do the dirty work. + */ +#define rcu_dereference(p) \ + rcu_dereference_check(p, rcu_read_lock_held()) + +/** + * rcu_dereference_bh - fetch an RCU-protected pointer, checking for RCU-bh + * + * Makes rcu_dereference_check() do the dirty work. + */ +#define rcu_dereference_bh(p) \ + rcu_dereference_check(p, rcu_read_lock_bh_held()) + +/** + * rcu_dereference_sched - fetch RCU-protected pointer, checking for RCU-sched + * + * Makes rcu_dereference_check() do the dirty work. + */ +#define rcu_dereference_sched(p) \ + rcu_dereference_check(p, rcu_read_lock_sched_held()) + +/** * rcu_assign_pointer - assign (publicize) a pointer to a newly * initialized structure that will be dereferenced by RCU read-side * critical sections. Returns the value assigned. @@ -232,43 +459,56 @@ extern int rcu_pending(int cpu); * code. */ -#define rcu_assign_pointer(p, v) ({ \ - smp_wmb(); \ - (p) = (v); \ - }) +#define rcu_assign_pointer(p, v) \ + ({ \ + if (!__builtin_constant_p(v) || \ + ((v) != NULL)) \ + smp_wmb(); \ + (p) = (v); \ + }) + +/* Infrastructure to implement the synchronize_() primitives. */ + +struct rcu_synchronize { + struct rcu_head head; + struct completion completion; +}; + +extern void wakeme_after_rcu(struct rcu_head *head); /** - * synchronize_sched - block until all CPUs have exited any non-preemptive - * kernel code sequences. - * - * This means that all preempt_disable code sequences, including NMI and - * hardware-interrupt handlers, in progress on entry will have completed - * before this primitive returns. However, this does not guarantee that - * softirq handlers will have completed, since in some kernels, these - * handlers can run in process context, and can block. - * - * This primitive provides the guarantees made by the (deprecated) - * synchronize_kernel() API. In contrast, synchronize_rcu() only - * guarantees that rcu_read_lock() sections will have completed. - * In "classic RCU", these two guarantees happen to be one and - * the same, but can differ in realtime RCU implementations. + * call_rcu - Queue an RCU callback for invocation after a grace period. + * @head: structure to be used for queueing the RCU updates. + * @func: actual update function to be invoked after the grace period + * + * The update function will be invoked some time after a full grace + * period elapses, in other words after all currently executing RCU + * read-side critical sections have completed. RCU read-side critical + * sections are delimited by rcu_read_lock() and rcu_read_unlock(), + * and may be nested. */ -#define synchronize_sched() synchronize_rcu() +extern void call_rcu(struct rcu_head *head, + void (*func)(struct rcu_head *head)); -extern void rcu_init(void); -extern void rcu_check_callbacks(int cpu, int user); -extern void rcu_restart_cpu(int cpu); -extern long rcu_batches_completed(void); - -/* Exported interfaces */ -extern void FASTCALL(call_rcu(struct rcu_head *head, - void (*func)(struct rcu_head *head))); -extern void FASTCALL(call_rcu_bh(struct rcu_head *head, - void (*func)(struct rcu_head *head))); -extern __deprecated_for_modules void synchronize_kernel(void); -extern void synchronize_rcu(void); -void synchronize_idle(void); -extern void rcu_barrier(void); +/** + * call_rcu_bh - Queue an RCU for invocation after a quicker grace period. + * @head: structure to be used for queueing the RCU updates. + * @func: actual update function to be invoked after the grace period + * + * The update function will be invoked some time after a full grace + * period elapses, in other words after all currently executing RCU + * read-side critical sections have completed. call_rcu_bh() assumes + * that the read-side critical sections end on completion of a softirq + * handler. This means that read-side critical sections in process + * context must not be interrupted by softirqs. This interface is to be + * used when most of the read-side critical sections are in softirq context. + * RCU read-side critical sections are delimited by : + * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context. + * OR + * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context. + * These may be nested. + */ +extern void call_rcu_bh(struct rcu_head *head, + void (*func)(struct rcu_head *head)); -#endif /* __KERNEL__ */ #endif /* __LINUX_RCUPDATE_H */