mac80211: fix deauth before assoc
[safe/jmp/linux-2.6] / kernel / lockdep_internals.h
index c2f6594..4f560cf 100644 (file)
@@ -91,6 +91,8 @@ extern unsigned int nr_process_chains;
 extern unsigned int max_lockdep_depth;
 extern unsigned int max_recursion_depth;
 
+extern unsigned int max_bfs_queue_depth;
+
 #ifdef CONFIG_PROVE_LOCKING
 extern unsigned long lockdep_count_forward_deps(struct lock_class *);
 extern unsigned long lockdep_count_backward_deps(struct lock_class *);
@@ -108,125 +110,61 @@ lockdep_count_backward_deps(struct lock_class *class)
 #endif
 
 #ifdef CONFIG_DEBUG_LOCKDEP
+
+#include <asm/local.h>
 /*
- * Various lockdep statistics:
+ * Various lockdep statistics.
+ * We want them per cpu as they are often accessed in fast path
+ * and we want to avoid too much cache bouncing.
  */
-extern atomic_t chain_lookup_hits;
-extern atomic_t chain_lookup_misses;
-extern atomic_t hardirqs_on_events;
-extern atomic_t hardirqs_off_events;
-extern atomic_t redundant_hardirqs_on;
-extern atomic_t redundant_hardirqs_off;
-extern atomic_t softirqs_on_events;
-extern atomic_t softirqs_off_events;
-extern atomic_t redundant_softirqs_on;
-extern atomic_t redundant_softirqs_off;
-extern atomic_t nr_unused_locks;
-extern atomic_t nr_cyclic_checks;
-extern atomic_t nr_cyclic_check_recursions;
-extern atomic_t nr_find_usage_forwards_checks;
-extern atomic_t nr_find_usage_forwards_recursions;
-extern atomic_t nr_find_usage_backwards_checks;
-extern atomic_t nr_find_usage_backwards_recursions;
-# define debug_atomic_inc(ptr)         atomic_inc(ptr)
-# define debug_atomic_dec(ptr)         atomic_dec(ptr)
-# define debug_atomic_read(ptr)                atomic_read(ptr)
-#else
-# define debug_atomic_inc(ptr)         do { } while (0)
-# define debug_atomic_dec(ptr)         do { } while (0)
-# define debug_atomic_read(ptr)                0
-#endif
-
-
-extern unsigned long nr_list_entries;
-extern struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
-extern unsigned long bfs_accessed[];
-
-/*For good efficiency of modular, we use power of 2*/
-#define  MAX_CIRCULAR_QUE_SIZE     4096UL
-
-/* The circular_queue and helpers is used to implement the
- * breadth-first search(BFS)algorithem, by which we can build
- * the shortest path from the next lock to be acquired to the
- * previous held lock if there is a circular between them.
- * */
-struct circular_queue{
-       unsigned long element[MAX_CIRCULAR_QUE_SIZE];
-       unsigned int  front, rear;
+struct lockdep_stats {
+       int     chain_lookup_hits;
+       int     chain_lookup_misses;
+       int     hardirqs_on_events;
+       int     hardirqs_off_events;
+       int     redundant_hardirqs_on;
+       int     redundant_hardirqs_off;
+       int     softirqs_on_events;
+       int     softirqs_off_events;
+       int     redundant_softirqs_on;
+       int     redundant_softirqs_off;
+       int     nr_unused_locks;
+       int     nr_cyclic_checks;
+       int     nr_cyclic_check_recursions;
+       int     nr_find_usage_forwards_checks;
+       int     nr_find_usage_forwards_recursions;
+       int     nr_find_usage_backwards_checks;
+       int     nr_find_usage_backwards_recursions;
 };
 
-static inline void __cq_init(struct circular_queue *cq)
-{
-       cq->front = cq->rear = 0;
-       bitmap_zero(bfs_accessed, MAX_LOCKDEP_ENTRIES);
-}
-
-static inline int __cq_empty(struct circular_queue *cq)
-{
-       return (cq->front == cq->rear);
-}
-
-static inline int __cq_full(struct circular_queue *cq)
-{
-       return ((cq->rear + 1)&(MAX_CIRCULAR_QUE_SIZE-1))  == cq->front;
-}
-
-static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
-{
-       if (__cq_full(cq))
-               return -1;
-
-       cq->element[cq->rear] = elem;
-       cq->rear = (cq->rear + 1)&(MAX_CIRCULAR_QUE_SIZE-1);
-       return 0;
-}
+DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
 
-static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
-{
-       if (__cq_empty(cq))
-               return -1;
+#define __debug_atomic_inc(ptr)                                        \
+       this_cpu_inc(lockdep_stats.ptr);
 
-       *elem = cq->element[cq->front];
-       cq->front = (cq->front + 1)&(MAX_CIRCULAR_QUE_SIZE-1);
-       return 0;
+#define debug_atomic_inc(ptr)                  {               \
+       WARN_ON_ONCE(!irqs_disabled());                         \
+       __this_cpu_inc(lockdep_stats.ptr);                      \
 }
 
-static inline int __cq_get_elem_count(struct circular_queue *cq)
-{
-       return (cq->rear - cq->front)&(MAX_CIRCULAR_QUE_SIZE-1);
-}
-
-static inline void mark_lock_accessed(struct lock_list *lock,
-                                       struct lock_list *parent)
-{
-       unsigned long nr;
-       nr = lock - list_entries;
-       WARN_ON(nr >= nr_list_entries);
-       lock->parent = parent;
-       set_bit(nr, bfs_accessed);
-}
-
-static inline unsigned long lock_accessed(struct lock_list *lock)
-{
-       unsigned long nr;
-       nr = lock - list_entries;
-       WARN_ON(nr >= nr_list_entries);
-       return test_bit(nr, bfs_accessed);
+#define debug_atomic_dec(ptr)                  {               \
+       WARN_ON_ONCE(!irqs_disabled());                         \
+       __this_cpu_dec(lockdep_stats.ptr);                      \
 }
 
-static inline struct lock_list *get_lock_parent(struct lock_list *child)
-{
-       return child->parent;
-}
-
-static inline unsigned long get_lock_depth(struct lock_list *child)
-{
-       unsigned long depth = 0;
-       struct lock_list *parent;
-
-       while ((parent = get_lock_parent(child))) {
-               child = parent;
-               depth++;
-       }
-       return depth;
-}
+#define debug_atomic_read(ptr)         ({                              \
+       struct lockdep_stats *__cpu_lockdep_stats;                      \
+       unsigned long long __total = 0;                                 \
+       int __cpu;                                                      \
+       for_each_possible_cpu(__cpu) {                                  \
+               __cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu);   \
+               __total += __cpu_lockdep_stats->ptr;                    \
+       }                                                               \
+       __total;                                                        \
+})
+#else
+# define __debug_atomic_inc(ptr)       do { } while (0)
+# define debug_atomic_inc(ptr)         do { } while (0)
+# define debug_atomic_dec(ptr)         do { } while (0)
+# define debug_atomic_read(ptr)                0
+#endif