sh: convert /proc/cpu/aligmnent, /proc/cpu/kernel_alignment to seq_file
[safe/jmp/linux-2.6] / include / linux / lockdep.h
index da2e2b2..9ccf0e2 100644 (file)
@@ -20,51 +20,10 @@ struct lockdep_map;
 #include <linux/stacktrace.h>
 
 /*
- * Lock-class usage-state bits:
+ * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
+ * the total number of states... :-(
  */
-enum lock_usage_bit
-{
-       LOCK_USED = 0,
-       LOCK_USED_IN_HARDIRQ,
-       LOCK_USED_IN_SOFTIRQ,
-       LOCK_USED_IN_RECLAIM_FS,
-       LOCK_ENABLED_SOFTIRQ,
-       LOCK_ENABLED_HARDIRQ,
-       LOCK_HELD_OVER_RECLAIM_FS,
-       LOCK_USED_IN_HARDIRQ_READ,
-       LOCK_USED_IN_SOFTIRQ_READ,
-       LOCK_USED_IN_RECLAIM_FS_READ,
-       LOCK_ENABLED_SOFTIRQ_READ,
-       LOCK_ENABLED_HARDIRQ_READ,
-       LOCK_HELD_OVER_RECLAIM_FS_READ,
-       LOCK_USAGE_STATES
-};
-
-/*
- * Usage-state bitmasks:
- */
-#define LOCKF_USED                     (1 << LOCK_USED)
-#define LOCKF_USED_IN_HARDIRQ          (1 << LOCK_USED_IN_HARDIRQ)
-#define LOCKF_USED_IN_SOFTIRQ          (1 << LOCK_USED_IN_SOFTIRQ)
-#define LOCKF_USED_IN_RECLAIM_FS       (1 << LOCK_USED_IN_RECLAIM_FS)
-#define LOCKF_ENABLED_HARDIRQ          (1 << LOCK_ENABLED_HARDIRQ)
-#define LOCKF_ENABLED_SOFTIRQ          (1 << LOCK_ENABLED_SOFTIRQ)
-#define LOCKF_HELD_OVER_RECLAIM_FS     (1 << LOCK_HELD_OVER_RECLAIM_FS)
-
-#define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ)
-#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
-
-#define LOCKF_USED_IN_HARDIRQ_READ     (1 << LOCK_USED_IN_HARDIRQ_READ)
-#define LOCKF_USED_IN_SOFTIRQ_READ     (1 << LOCK_USED_IN_SOFTIRQ_READ)
-#define LOCKF_USED_IN_RECLAIM_FS_READ  (1 << LOCK_USED_IN_RECLAIM_FS_READ)
-#define LOCKF_ENABLED_HARDIRQ_READ     (1 << LOCK_ENABLED_HARDIRQ_READ)
-#define LOCKF_ENABLED_SOFTIRQ_READ     (1 << LOCK_ENABLED_SOFTIRQ_READ)
-#define LOCKF_HELD_OVER_RECLAIM_FS_READ        (1 << LOCK_HELD_OVER_RECLAIM_FS_READ)
-
-#define LOCKF_ENABLED_IRQ_READ \
-               (LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ)
-#define LOCKF_USED_IN_IRQ_READ \
-               (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
+#define XXX_LOCK_USAGE_STATES          (1+3*4)
 
 #define MAX_LOCKDEP_SUBCLASSES         8UL
 
@@ -105,7 +64,7 @@ struct lock_class {
         * IRQ/softirq usage tracking bits:
         */
        unsigned long                   usage_mask;
-       struct stack_trace              usage_traces[LOCK_USAGE_STATES];
+       struct stack_trace              usage_traces[XXX_LOCK_USAGE_STATES];
 
        /*
         * These fields represent a directed graph of lock dependencies,
@@ -190,6 +149,12 @@ struct lock_list {
        struct lock_class               *class;
        struct stack_trace              trace;
        int                             distance;
+
+       /*
+        * The parent field is used to implement breadth-first search, and the
+        * bit 0 is reused to indicate if the lock has been accessed in BFS.
+        */
+       struct lock_list                *parent;
 };
 
 /*
@@ -249,10 +214,12 @@ struct held_lock {
         * interrupt context:
         */
        unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
-       unsigned int trylock:1;
+       unsigned int trylock:1;                                         /* 16 bits */
+
        unsigned int read:2;        /* see lock_acquire() comment */
        unsigned int check:2;       /* see lock_acquire() comment */
        unsigned int hardirqs_off:1;
+       unsigned int references:11;                                     /* 32 bits */
 };
 
 /*
@@ -299,6 +266,16 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
 #define lockdep_set_subclass(lock, sub)        \
                lockdep_init_map(&(lock)->dep_map, #lock, \
                                 (lock)->dep_map.key, sub)
+/*
+ * Compare locking classes
+ */
+#define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
+
+static inline int lockdep_match_key(struct lockdep_map *lock,
+                                   struct lock_class_key *key)
+{
+       return lock->key == key;
+}
 
 /*
  * Acquire a lock.
@@ -322,6 +299,10 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
 extern void lock_release(struct lockdep_map *lock, int nested,
                         unsigned long ip);
 
+#define lockdep_is_held(lock)  lock_is_held(&(lock)->dep_map)
+
+extern int lock_is_held(struct lockdep_map *lock);
+
 extern void lock_set_class(struct lockdep_map *lock, const char *name,
                           struct lock_class_key *key, unsigned int subclass,
                           unsigned long ip);
@@ -340,6 +321,8 @@ extern void lockdep_trace_alloc(gfp_t mask);
 
 #define lockdep_depth(tsk)     (debug_locks ? (tsk)->lockdep_depth : 0)
 
+#define lockdep_assert_held(l) WARN_ON(debug_locks && !lockdep_is_held(l))
+
 #else /* !LOCKDEP */
 
 static inline void lockdep_off(void)
@@ -367,6 +350,11 @@ static inline void lockdep_on(void)
 #define lockdep_set_class_and_subclass(lock, key, sub) \
                do { (void)(key); } while (0)
 #define lockdep_set_subclass(lock, sub)                do { } while (0)
+/*
+ * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
+ * case since the result is not well defined and the caller should rather
+ * #ifdef the call himself.
+ */
 
 # define INIT_LOCKDEP
 # define lockdep_reset()               do { debug_locks = 1; } while (0)
@@ -379,6 +367,8 @@ struct lock_class_key { };
 
 #define lockdep_depth(tsk)     (0)
 
+#define lockdep_assert_held(l)                 do { } while (0)
+
 #endif /* !LOCKDEP */
 
 #ifdef CONFIG_LOCK_STAT
@@ -405,6 +395,23 @@ do {                                                               \
 
 #endif /* CONFIG_LOCK_STAT */
 
+#ifdef CONFIG_LOCKDEP
+
+/*
+ * On lockdep we dont want the hand-coded irq-enable of
+ * _raw_*_lock_flags() code, because lockdep assumes
+ * that interrupts are not re-enabled during lock-acquire:
+ */
+#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
+       LOCK_CONTENDED((_lock), (try), (lock))
+
+#else /* CONFIG_LOCKDEP */
+
+#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
+       lockfl((_lock), (flags))
+
+#endif /* CONFIG_LOCKDEP */
+
 #ifdef CONFIG_GENERIC_HARDIRQS
 extern void early_init_irq_lock_class(void);
 #else