2 * Runtime locking correctness validator
4 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * see Documentation/lockdep-design.txt for more details.
9 #ifndef __LINUX_LOCKDEP_H
10 #define __LINUX_LOCKDEP_H
17 #include <linux/linkage.h>
18 #include <linux/list.h>
19 #include <linux/debug_locks.h>
20 #include <linux/stacktrace.h>
23 * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
24 * the total number of states... :-(
26 #define XXX_LOCK_USAGE_STATES (1+3*4)
28 #define MAX_LOCKDEP_SUBCLASSES 8UL
31 * Lock-classes are keyed via unique addresses, by embedding the
32 * lockclass-key into the kernel (or module) .data section. (For
33 * static locks we use the lock address itself as the key.)
35 struct lockdep_subclass_key {
37 } __attribute__ ((__packed__));
39 struct lock_class_key {
40 struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
43 #define LOCKSTAT_POINTS 4
46 * The lock-class itself:
52 struct list_head hash_entry;
55 * global list of all lock-classes:
57 struct list_head lock_entry;
59 struct lockdep_subclass_key *key;
60 unsigned int subclass;
63 * IRQ/softirq usage tracking bits:
65 unsigned long usage_mask;
66 struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES];
69 * These fields represent a directed graph of lock dependencies,
70 * to every node we attach a list of "forward" and a list of
71 * "backward" graph nodes.
73 struct list_head locks_after, locks_before;
76 * Generation counter, when doing certain classes of graph walking,
77 * to ensure that we check one node only once:
89 #ifdef CONFIG_LOCK_STAT
90 unsigned long contention_point[LOCKSTAT_POINTS];
91 unsigned long contending_point[LOCKSTAT_POINTS];
95 #ifdef CONFIG_LOCK_STAT
104 bounce_acquired_write,
105 bounce_acquired_read,
106 bounce_contended_write,
107 bounce_contended_read,
110 bounce_acquired = bounce_acquired_write,
111 bounce_contended = bounce_contended_write,
114 struct lock_class_stats {
115 unsigned long contention_point[4];
116 unsigned long contending_point[4];
117 struct lock_time read_waittime;
118 struct lock_time write_waittime;
119 struct lock_time read_holdtime;
120 struct lock_time write_holdtime;
121 unsigned long bounces[nr_bounce_types];
124 struct lock_class_stats lock_stats(struct lock_class *class);
125 void clear_lock_stats(struct lock_class *class);
129 * Map the lock object (the lock instance) to the lock-class object.
130 * This is embedded into specific lock instances:
133 struct lock_class_key *key;
134 struct lock_class *class_cache;
136 #ifdef CONFIG_LOCK_STAT
143 * Every lock has a list of other locks that were taken after it.
144 * We only grow the list, never remove from it:
147 struct list_head entry;
148 struct lock_class *class;
149 struct stack_trace trace;
153 * The parent field is used to implement breadth-first search, and the
154 * bit 0 is reused to indicate if the lock has been accessed in BFS.
156 struct lock_list *parent;
160 * We record lock dependency chains, so that we can cache them:
166 struct list_head entry;
170 #define MAX_LOCKDEP_KEYS_BITS 13
172 * Subtract one because we offset hlock->class_idx by 1 in order
173 * to make 0 mean no class. This avoids overflowing the class_idx
174 * bitfield and hitting the BUG in hlock_class().
176 #define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
180 * One-way hash of the dependency chain up to this point. We
181 * hash the hashes step by step as the dependency chain grows.
183 * We use it for dependency-caching and we skip detection
184 * passes and dependency-updates if there is a cache-hit, so
185 * it is absolutely critical for 100% coverage of the validator
186 * to have a unique key value for every unique dependency path
187 * that can occur in the system, to make a unique hash value
188 * as likely as possible - hence the 64-bit width.
190 * The task struct holds the current hash value (initialized
191 * with zero), here we store the previous hash value:
194 unsigned long acquire_ip;
195 struct lockdep_map *instance;
196 struct lockdep_map *nest_lock;
197 #ifdef CONFIG_LOCK_STAT
201 unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
203 * The lock-stack is unified in that the lock chains of interrupt
204 * contexts nest ontop of process context chains, but we 'separate'
205 * the hashes by starting with 0 if we cross into an interrupt
206 * context, and we also keep do not add cross-context lock
207 * dependencies - the lock usage graph walking covers that area
208 * anyway, and we'd just unnecessarily increase the number of
209 * dependencies otherwise. [Note: hardirq and softirq contexts
210 * are separated from each other too.]
212 * The following field is used to detect when we cross into an
215 unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
216 unsigned int trylock:1;
217 unsigned int read:2; /* see lock_acquire() comment */
218 unsigned int check:2; /* see lock_acquire() comment */
219 unsigned int hardirqs_off:1;
223 * Initialization, self-test and debugging-output methods:
225 extern void lockdep_init(void);
226 extern void lockdep_info(void);
227 extern void lockdep_reset(void);
228 extern void lockdep_reset_lock(struct lockdep_map *lock);
229 extern void lockdep_free_key_range(void *start, unsigned long size);
230 extern void lockdep_sys_exit(void);
232 extern void lockdep_off(void);
233 extern void lockdep_on(void);
236 * These methods are used by specific locking variants (spinlocks,
237 * rwlocks, mutexes and rwsems) to pass init/acquire/release events
241 extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
242 struct lock_class_key *key, int subclass);
245 * To initialize a lockdep_map statically use this macro.
246 * Note that _name must not be NULL.
248 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
249 { .name = (_name), .key = (void *)(_key), }
252 * Reinitialize a lock key - for cases where there is special locking or
253 * special initialization of locks so that the validator gets the scope
254 * of dependencies wrong: they are either too broad (they need a class-split)
255 * or they are too narrow (they suffer from a false class-split):
257 #define lockdep_set_class(lock, key) \
258 lockdep_init_map(&(lock)->dep_map, #key, key, 0)
259 #define lockdep_set_class_and_name(lock, key, name) \
260 lockdep_init_map(&(lock)->dep_map, name, key, 0)
261 #define lockdep_set_class_and_subclass(lock, key, sub) \
262 lockdep_init_map(&(lock)->dep_map, #key, key, sub)
263 #define lockdep_set_subclass(lock, sub) \
264 lockdep_init_map(&(lock)->dep_map, #lock, \
265 (lock)->dep_map.key, sub)
267 * Compare locking classes
269 #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
271 static inline int lockdep_match_key(struct lockdep_map *lock,
272 struct lock_class_key *key)
274 return lock->key == key;
282 * 0: exclusive (write) acquire
283 * 1: read-acquire (no recursion allowed)
284 * 2: read-acquire with same-instance recursion allowed
289 * 1: simple checks (freeing, held-at-exit-time, etc.)
292 extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
293 int trylock, int read, int check,
294 struct lockdep_map *nest_lock, unsigned long ip);
296 extern void lock_release(struct lockdep_map *lock, int nested,
299 #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map)
301 extern int lock_is_held(struct lockdep_map *lock);
303 extern void lock_set_class(struct lockdep_map *lock, const char *name,
304 struct lock_class_key *key, unsigned int subclass,
307 static inline void lock_set_subclass(struct lockdep_map *lock,
308 unsigned int subclass, unsigned long ip)
310 lock_set_class(lock, lock->name, lock->key, subclass, ip);
313 extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
314 extern void lockdep_clear_current_reclaim_state(void);
315 extern void lockdep_trace_alloc(gfp_t mask);
317 # define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
319 #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
321 #define lockdep_assert_held(l) WARN_ON(debug_locks && !lockdep_is_held(l))
325 static inline void lockdep_off(void)
329 static inline void lockdep_on(void)
333 # define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
334 # define lock_release(l, n, i) do { } while (0)
335 # define lock_set_class(l, n, k, s, i) do { } while (0)
336 # define lock_set_subclass(l, s, i) do { } while (0)
337 # define lockdep_set_current_reclaim_state(g) do { } while (0)
338 # define lockdep_clear_current_reclaim_state() do { } while (0)
339 # define lockdep_trace_alloc(g) do { } while (0)
340 # define lockdep_init() do { } while (0)
341 # define lockdep_info() do { } while (0)
342 # define lockdep_init_map(lock, name, key, sub) \
343 do { (void)(name); (void)(key); } while (0)
344 # define lockdep_set_class(lock, key) do { (void)(key); } while (0)
345 # define lockdep_set_class_and_name(lock, key, name) \
346 do { (void)(key); (void)(name); } while (0)
347 #define lockdep_set_class_and_subclass(lock, key, sub) \
348 do { (void)(key); } while (0)
349 #define lockdep_set_subclass(lock, sub) do { } while (0)
351 * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
352 * case since the result is not well defined and the caller should rather
353 * #ifdef the call himself.
356 # define INIT_LOCKDEP
357 # define lockdep_reset() do { debug_locks = 1; } while (0)
358 # define lockdep_free_key_range(start, size) do { } while (0)
359 # define lockdep_sys_exit() do { } while (0)
361 * The class key takes no space if lockdep is disabled:
363 struct lock_class_key { };
365 #define lockdep_depth(tsk) (0)
367 #define lockdep_assert_held(l) do { } while (0)
369 #endif /* !LOCKDEP */
371 #ifdef CONFIG_LOCK_STAT
373 extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
374 extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
376 #define LOCK_CONTENDED(_lock, try, lock) \
379 lock_contended(&(_lock)->dep_map, _RET_IP_); \
382 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
385 #else /* CONFIG_LOCK_STAT */
387 #define lock_contended(lockdep_map, ip) do {} while (0)
388 #define lock_acquired(lockdep_map, ip) do {} while (0)
390 #define LOCK_CONTENDED(_lock, try, lock) \
393 #endif /* CONFIG_LOCK_STAT */
395 #ifdef CONFIG_LOCKDEP
398 * On lockdep we dont want the hand-coded irq-enable of
399 * _raw_*_lock_flags() code, because lockdep assumes
400 * that interrupts are not re-enabled during lock-acquire:
402 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
403 LOCK_CONTENDED((_lock), (try), (lock))
405 #else /* CONFIG_LOCKDEP */
407 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
408 lockfl((_lock), (flags))
410 #endif /* CONFIG_LOCKDEP */
412 #ifdef CONFIG_GENERIC_HARDIRQS
413 extern void early_init_irq_lock_class(void);
415 static inline void early_init_irq_lock_class(void)
420 #ifdef CONFIG_TRACE_IRQFLAGS
421 extern void early_boot_irqs_off(void);
422 extern void early_boot_irqs_on(void);
423 extern void print_irqtrace_events(struct task_struct *curr);
425 static inline void early_boot_irqs_off(void)
428 static inline void early_boot_irqs_on(void)
431 static inline void print_irqtrace_events(struct task_struct *curr)
437 * For trivial one-depth nesting of a lock-class, the following
438 * global define can be used. (Subsystems with multiple levels
439 * of nesting should define their own lock-nesting subclasses.)
441 #define SINGLE_DEPTH_NESTING 1
444 * Map the dependency ops to NOP or to real lockdep ops, depending
445 * on the per lock-class debug mode:
448 #ifdef CONFIG_DEBUG_LOCK_ALLOC
449 # ifdef CONFIG_PROVE_LOCKING
450 # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
451 # define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
453 # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
454 # define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, NULL, i)
456 # define spin_release(l, n, i) lock_release(l, n, i)
458 # define spin_acquire(l, s, t, i) do { } while (0)
459 # define spin_release(l, n, i) do { } while (0)
462 #ifdef CONFIG_DEBUG_LOCK_ALLOC
463 # ifdef CONFIG_PROVE_LOCKING
464 # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
465 # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, NULL, i)
467 # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
468 # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, NULL, i)
470 # define rwlock_release(l, n, i) lock_release(l, n, i)
472 # define rwlock_acquire(l, s, t, i) do { } while (0)
473 # define rwlock_acquire_read(l, s, t, i) do { } while (0)
474 # define rwlock_release(l, n, i) do { } while (0)
477 #ifdef CONFIG_DEBUG_LOCK_ALLOC
478 # ifdef CONFIG_PROVE_LOCKING
479 # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
481 # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
483 # define mutex_release(l, n, i) lock_release(l, n, i)
485 # define mutex_acquire(l, s, t, i) do { } while (0)
486 # define mutex_release(l, n, i) do { } while (0)
489 #ifdef CONFIG_DEBUG_LOCK_ALLOC
490 # ifdef CONFIG_PROVE_LOCKING
491 # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
492 # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i)
494 # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
495 # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i)
497 # define rwsem_release(l, n, i) lock_release(l, n, i)
499 # define rwsem_acquire(l, s, t, i) do { } while (0)
500 # define rwsem_acquire_read(l, s, t, i) do { } while (0)
501 # define rwsem_release(l, n, i) do { } while (0)
504 #ifdef CONFIG_DEBUG_LOCK_ALLOC
505 # ifdef CONFIG_PROVE_LOCKING
506 # define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_)
508 # define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_)
510 # define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
512 # define lock_map_acquire(l) do { } while (0)
513 # define lock_map_release(l) do { } while (0)
516 #ifdef CONFIG_PROVE_LOCKING
517 # define might_lock(lock) \
519 typecheck(struct lockdep_map *, &(lock)->dep_map); \
520 lock_acquire(&(lock)->dep_map, 0, 0, 0, 2, NULL, _THIS_IP_); \
521 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
523 # define might_lock_read(lock) \
525 typecheck(struct lockdep_map *, &(lock)->dep_map); \
526 lock_acquire(&(lock)->dep_map, 0, 0, 1, 2, NULL, _THIS_IP_); \
527 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
530 # define might_lock(lock) do { } while (0)
531 # define might_lock_read(lock) do { } while (0)
534 #endif /* __LINUX_LOCKDEP_H */