lockdep: Improve implementation of BFS
[safe/jmp/linux-2.6] / kernel / lockdep_internals.h
1 /*
2  * kernel/lockdep_internals.h
3  *
4  * Runtime locking correctness validator
5  *
6  * lockdep subsystem internal functions and variables.
7  */
8
9 /*
10  * Lock-class usage-state bits:
11  */
12 enum lock_usage_bit {
13 #define LOCKDEP_STATE(__STATE)          \
14         LOCK_USED_IN_##__STATE,         \
15         LOCK_USED_IN_##__STATE##_READ,  \
16         LOCK_ENABLED_##__STATE,         \
17         LOCK_ENABLED_##__STATE##_READ,
18 #include "lockdep_states.h"
19 #undef LOCKDEP_STATE
20         LOCK_USED,
21         LOCK_USAGE_STATES
22 };
23
24 /*
25  * Usage-state bitmasks:
26  */
27 #define __LOCKF(__STATE)        LOCKF_##__STATE = (1 << LOCK_##__STATE),
28
29 enum {
30 #define LOCKDEP_STATE(__STATE)                                          \
31         __LOCKF(USED_IN_##__STATE)                                      \
32         __LOCKF(USED_IN_##__STATE##_READ)                               \
33         __LOCKF(ENABLED_##__STATE)                                      \
34         __LOCKF(ENABLED_##__STATE##_READ)
35 #include "lockdep_states.h"
36 #undef LOCKDEP_STATE
37         __LOCKF(USED)
38 };
39
40 #define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ)
41 #define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
42
43 #define LOCKF_ENABLED_IRQ_READ \
44                 (LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ)
45 #define LOCKF_USED_IN_IRQ_READ \
46                 (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
47
48 /*
49  * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
50  * we track.
51  *
52  * We use the per-lock dependency maps in two ways: we grow it by adding
53  * every to-be-taken lock to all currently held lock's own dependency
54  * table (if it's not there yet), and we check it for lock order
55  * conflicts and deadlocks.
56  */
57 #define MAX_LOCKDEP_ENTRIES     16384UL
58
59 #define MAX_LOCKDEP_CHAINS_BITS 15
60 #define MAX_LOCKDEP_CHAINS      (1UL << MAX_LOCKDEP_CHAINS_BITS)
61
62 #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
63
64 /*
65  * Stack-trace: tightly packed array of stack backtrace
66  * addresses. Protected by the hash_lock.
67  */
68 #define MAX_STACK_TRACE_ENTRIES 262144UL
69
70 extern struct list_head all_lock_classes;
71 extern struct lock_chain lock_chains[];
72
73 #define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2)
74
75 extern void get_usage_chars(struct lock_class *class,
76                             char usage[LOCK_USAGE_CHARS]);
77
78 extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str);
79
80 struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i);
81
82 extern unsigned long nr_lock_classes;
83 extern unsigned long nr_list_entries;
84 extern unsigned long nr_lock_chains;
85 extern int nr_chain_hlocks;
86 extern unsigned long nr_stack_trace_entries;
87
88 extern unsigned int nr_hardirq_chains;
89 extern unsigned int nr_softirq_chains;
90 extern unsigned int nr_process_chains;
91 extern unsigned int max_lockdep_depth;
92 extern unsigned int max_recursion_depth;
93
94 #ifdef CONFIG_PROVE_LOCKING
95 extern unsigned long lockdep_count_forward_deps(struct lock_class *);
96 extern unsigned long lockdep_count_backward_deps(struct lock_class *);
97 #else
98 static inline unsigned long
99 lockdep_count_forward_deps(struct lock_class *class)
100 {
101         return 0;
102 }
103 static inline unsigned long
104 lockdep_count_backward_deps(struct lock_class *class)
105 {
106         return 0;
107 }
108 #endif
109
110 #ifdef CONFIG_DEBUG_LOCKDEP
111 /*
112  * Various lockdep statistics:
113  */
114 extern atomic_t chain_lookup_hits;
115 extern atomic_t chain_lookup_misses;
116 extern atomic_t hardirqs_on_events;
117 extern atomic_t hardirqs_off_events;
118 extern atomic_t redundant_hardirqs_on;
119 extern atomic_t redundant_hardirqs_off;
120 extern atomic_t softirqs_on_events;
121 extern atomic_t softirqs_off_events;
122 extern atomic_t redundant_softirqs_on;
123 extern atomic_t redundant_softirqs_off;
124 extern atomic_t nr_unused_locks;
125 extern atomic_t nr_cyclic_checks;
126 extern atomic_t nr_cyclic_check_recursions;
127 extern atomic_t nr_find_usage_forwards_checks;
128 extern atomic_t nr_find_usage_forwards_recursions;
129 extern atomic_t nr_find_usage_backwards_checks;
130 extern atomic_t nr_find_usage_backwards_recursions;
131 # define debug_atomic_inc(ptr)          atomic_inc(ptr)
132 # define debug_atomic_dec(ptr)          atomic_dec(ptr)
133 # define debug_atomic_read(ptr)         atomic_read(ptr)
134 #else
135 # define debug_atomic_inc(ptr)          do { } while (0)
136 # define debug_atomic_dec(ptr)          do { } while (0)
137 # define debug_atomic_read(ptr)         0
138 #endif
139
140
141 extern unsigned long nr_list_entries;
142 extern struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
143 extern unsigned long bfs_accessed[];
144
145 /*For good efficiency of modular, we use power of 2*/
146 #define  MAX_CIRCULAR_QUE_SIZE      4096UL
147
148 /* The circular_queue and helpers is used to implement the
149  * breadth-first search(BFS)algorithem, by which we can build
150  * the shortest path from the next lock to be acquired to the
151  * previous held lock if there is a circular between them.
152  * */
153 struct circular_queue{
154         unsigned long element[MAX_CIRCULAR_QUE_SIZE];
155         unsigned int  front, rear;
156 };
157
158 static inline void __cq_init(struct circular_queue *cq)
159 {
160         cq->front = cq->rear = 0;
161         bitmap_zero(bfs_accessed, MAX_LOCKDEP_ENTRIES);
162 }
163
164 static inline int __cq_empty(struct circular_queue *cq)
165 {
166         return (cq->front == cq->rear);
167 }
168
169 static inline int __cq_full(struct circular_queue *cq)
170 {
171         return ((cq->rear + 1)&(MAX_CIRCULAR_QUE_SIZE-1))  == cq->front;
172 }
173
174 static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
175 {
176         if (__cq_full(cq))
177                 return -1;
178
179         cq->element[cq->rear] = elem;
180         cq->rear = (cq->rear + 1)&(MAX_CIRCULAR_QUE_SIZE-1);
181         return 0;
182 }
183
184 static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
185 {
186         if (__cq_empty(cq))
187                 return -1;
188
189         *elem = cq->element[cq->front];
190         cq->front = (cq->front + 1)&(MAX_CIRCULAR_QUE_SIZE-1);
191         return 0;
192 }
193
194 static inline int __cq_get_elem_count(struct circular_queue *cq)
195 {
196         return (cq->rear - cq->front)&(MAX_CIRCULAR_QUE_SIZE-1);
197 }
198
199 static inline void mark_lock_accessed(struct lock_list *lock,
200                                         struct lock_list *parent)
201 {
202         unsigned long nr;
203         nr = lock - list_entries;
204         WARN_ON(nr >= nr_list_entries);
205         lock->parent = parent;
206         set_bit(nr, bfs_accessed);
207 }
208
209 static inline unsigned long lock_accessed(struct lock_list *lock)
210 {
211         unsigned long nr;
212         nr = lock - list_entries;
213         WARN_ON(nr >= nr_list_entries);
214         return test_bit(nr, bfs_accessed);
215 }
216
217 static inline struct lock_list *get_lock_parent(struct lock_list *child)
218 {
219         return child->parent;
220 }
221
222 static inline unsigned long get_lock_depth(struct lock_list *child)
223 {
224         unsigned long depth = 0;
225         struct lock_list *parent;
226
227         while ((parent = get_lock_parent(child))) {
228                 child = parent;
229                 depth++;
230         }
231         return depth;
232 }