X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=kernel%2Flockdep_internals.h;h=4f560cfedc8fd038c4dd09bfb590a9932fbd1193;hb=ff9da691c0498ff81fdd014e7a0731dab2337dac;hp=6f48d37d5be28feb1e54d216852a8dd340dbbb7c;hpb=c94aa5ca3088018d2a7a9bd3258aefffe29df265;p=safe%2Fjmp%2Flinux-2.6 diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h index 6f48d37..4f560cf 100644 --- a/kernel/lockdep_internals.h +++ b/kernel/lockdep_internals.h @@ -91,6 +91,8 @@ extern unsigned int nr_process_chains; extern unsigned int max_lockdep_depth; extern unsigned int max_recursion_depth; +extern unsigned int max_bfs_queue_depth; + #ifdef CONFIG_PROVE_LOCKING extern unsigned long lockdep_count_forward_deps(struct lock_class *); extern unsigned long lockdep_count_backward_deps(struct lock_class *); @@ -108,114 +110,61 @@ lockdep_count_backward_deps(struct lock_class *class) #endif #ifdef CONFIG_DEBUG_LOCKDEP + +#include /* - * Various lockdep statistics: + * Various lockdep statistics. + * We want them per cpu as they are often accessed in fast path + * and we want to avoid too much cache bouncing. */ -extern atomic_t chain_lookup_hits; -extern atomic_t chain_lookup_misses; -extern atomic_t hardirqs_on_events; -extern atomic_t hardirqs_off_events; -extern atomic_t redundant_hardirqs_on; -extern atomic_t redundant_hardirqs_off; -extern atomic_t softirqs_on_events; -extern atomic_t softirqs_off_events; -extern atomic_t redundant_softirqs_on; -extern atomic_t redundant_softirqs_off; -extern atomic_t nr_unused_locks; -extern atomic_t nr_cyclic_checks; -extern atomic_t nr_cyclic_check_recursions; -extern atomic_t nr_find_usage_forwards_checks; -extern atomic_t nr_find_usage_forwards_recursions; -extern atomic_t nr_find_usage_backwards_checks; -extern atomic_t nr_find_usage_backwards_recursions; -# define debug_atomic_inc(ptr) atomic_inc(ptr) -# define debug_atomic_dec(ptr) atomic_dec(ptr) -# define debug_atomic_read(ptr) atomic_read(ptr) -#else -# define debug_atomic_inc(ptr) do { } while (0) -# define debug_atomic_dec(ptr) do { } while (0) -# define debug_atomic_read(ptr) 0 -#endif - -/* The circular_queue and helpers is used to implement the - * breadth-first search(BFS)algorithem, by which we can build - * the shortest path from the next lock to be acquired to the - * previous held lock if there is a circular between them. - * */ -#define MAX_CIRCULAR_QUE_SIZE 4096UL -struct circular_queue{ - unsigned long element[MAX_CIRCULAR_QUE_SIZE]; - unsigned int front, rear; +struct lockdep_stats { + int chain_lookup_hits; + int chain_lookup_misses; + int hardirqs_on_events; + int hardirqs_off_events; + int redundant_hardirqs_on; + int redundant_hardirqs_off; + int softirqs_on_events; + int softirqs_off_events; + int redundant_softirqs_on; + int redundant_softirqs_off; + int nr_unused_locks; + int nr_cyclic_checks; + int nr_cyclic_check_recursions; + int nr_find_usage_forwards_checks; + int nr_find_usage_forwards_recursions; + int nr_find_usage_backwards_checks; + int nr_find_usage_backwards_recursions; }; -#define LOCK_ACCESSED 1UL -#define LOCK_ACCESSED_MASK (~LOCK_ACCESSED) +DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats); -static inline void __cq_init(struct circular_queue *cq) -{ - cq->front = cq->rear = 0; -} +#define __debug_atomic_inc(ptr) \ + this_cpu_inc(lockdep_stats.ptr); -static inline int __cq_empty(struct circular_queue *cq) -{ - return (cq->front == cq->rear); +#define debug_atomic_inc(ptr) { \ + WARN_ON_ONCE(!irqs_disabled()); \ + __this_cpu_inc(lockdep_stats.ptr); \ } -static inline int __cq_full(struct circular_queue *cq) -{ - return ((cq->rear + 1)%MAX_CIRCULAR_QUE_SIZE) == cq->front; -} - -static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem) -{ - if (__cq_full(cq)) - return -1; - - cq->element[cq->rear] = elem; - cq->rear = (cq->rear + 1)%MAX_CIRCULAR_QUE_SIZE; - return 0; +#define debug_atomic_dec(ptr) { \ + WARN_ON_ONCE(!irqs_disabled()); \ + __this_cpu_dec(lockdep_stats.ptr); \ } -static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem) -{ - if (__cq_empty(cq)) - return -1; - - *elem = cq->element[cq->front]; - cq->front = (cq->front + 1)%MAX_CIRCULAR_QUE_SIZE; - return 0; -} - -static inline int __cq_get_elem_count(struct circular_queue *cq) -{ - return (cq->rear - cq->front)%MAX_CIRCULAR_QUE_SIZE; -} - -static inline void mark_lock_accessed(struct lock_list *lock, - struct lock_list *parent) -{ - lock->parent = (void *) parent + LOCK_ACCESSED; -} - -static inline unsigned long lock_accessed(struct lock_list *lock) -{ - return (unsigned long)lock->parent & LOCK_ACCESSED; -} - -static inline struct lock_list *get_lock_parent(struct lock_list *child) -{ - return (struct lock_list *) - ((unsigned long)child->parent & LOCK_ACCESSED_MASK); -} - -static inline unsigned long get_lock_depth(struct lock_list *child) -{ - unsigned long depth = 0; - struct lock_list *parent; - - while ((parent = get_lock_parent(child))) { - child = parent; - depth++; - } - return depth; -} +#define debug_atomic_read(ptr) ({ \ + struct lockdep_stats *__cpu_lockdep_stats; \ + unsigned long long __total = 0; \ + int __cpu; \ + for_each_possible_cpu(__cpu) { \ + __cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu); \ + __total += __cpu_lockdep_stats->ptr; \ + } \ + __total; \ +}) +#else +# define __debug_atomic_inc(ptr) do { } while (0) +# define debug_atomic_inc(ptr) do { } while (0) +# define debug_atomic_dec(ptr) do { } while (0) +# define debug_atomic_read(ptr) 0 +#endif