lockd: fix list corruption on lockd restart
[safe/jmp/linux-2.6] / kernel / lockdep.c
index a796f1f..b0f0118 100644 (file)
@@ -25,6 +25,7 @@
  * Thanks to Arjan van de Ven for coming up with the initial idea of
  * mapping lock dependencies runtime.
  */
+#define DISABLE_BRANCH_PROFILING
 #include <linux/mutex.h>
 #include <linux/sched.h>
 #include <linux/delay.h>
@@ -39,6 +40,9 @@
 #include <linux/irqflags.h>
 #include <linux/utsname.h>
 #include <linux/hash.h>
+#include <linux/ftrace.h>
+#include <linux/stringify.h>
+#include <trace/lockdep.h>
 
 #include <asm/sections.h>
 
@@ -81,6 +85,8 @@ static int graph_lock(void)
                __raw_spin_unlock(&lockdep_lock);
                return 0;
        }
+       /* prevent any recursions within lockdep from causing deadlocks */
+       current->lockdep_recursion++;
        return 1;
 }
 
@@ -89,6 +95,7 @@ static inline int graph_unlock(void)
        if (debug_locks && !__raw_spin_is_locked(&lockdep_lock))
                return DEBUG_LOCKS_WARN_ON(1);
 
+       current->lockdep_recursion--;
        __raw_spin_unlock(&lockdep_lock);
        return 0;
 }
@@ -120,19 +127,28 @@ static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
 unsigned long nr_lock_classes;
 static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
 
+static inline struct lock_class *hlock_class(struct held_lock *hlock)
+{
+       if (!hlock->class_idx) {
+               DEBUG_LOCKS_WARN_ON(1);
+               return NULL;
+       }
+       return lock_classes + hlock->class_idx - 1;
+}
+
 #ifdef CONFIG_LOCK_STAT
 static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
 
-static int lock_contention_point(struct lock_class *class, unsigned long ip)
+static int lock_point(unsigned long points[], unsigned long ip)
 {
        int i;
 
-       for (i = 0; i < ARRAY_SIZE(class->contention_point); i++) {
-               if (class->contention_point[i] == 0) {
-                       class->contention_point[i] = ip;
+       for (i = 0; i < LOCKSTAT_POINTS; i++) {
+               if (points[i] == 0) {
+                       points[i] = ip;
                        break;
                }
-               if (class->contention_point[i] == ip)
+               if (points[i] == ip)
                        break;
        }
 
@@ -172,6 +188,9 @@ struct lock_class_stats lock_stats(struct lock_class *class)
                for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
                        stats.contention_point[i] += pcs->contention_point[i];
 
+               for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
+                       stats.contending_point[i] += pcs->contending_point[i];
+
                lock_time_add(&pcs->read_waittime, &stats.read_waittime);
                lock_time_add(&pcs->write_waittime, &stats.write_waittime);
 
@@ -196,6 +215,7 @@ void clear_lock_stats(struct lock_class *class)
                memset(cpu_stats, 0, sizeof(struct lock_class_stats));
        }
        memset(class->contention_point, 0, sizeof(class->contention_point));
+       memset(class->contending_point, 0, sizeof(class->contending_point));
 }
 
 static struct lock_class_stats *get_lock_stats(struct lock_class *class)
@@ -218,7 +238,7 @@ static void lock_release_holdtime(struct held_lock *hlock)
 
        holdtime = sched_clock() - hlock->holdtime_stamp;
 
-       stats = get_lock_stats(hlock->class);
+       stats = get_lock_stats(hlock_class(hlock));
        if (hlock->read)
                lock_time_inc(&stats->read_holdtime, holdtime);
        else
@@ -274,14 +294,12 @@ void lockdep_off(void)
 {
        current->lockdep_recursion++;
 }
-
 EXPORT_SYMBOL(lockdep_off);
 
 void lockdep_on(void)
 {
        current->lockdep_recursion--;
 }
-
 EXPORT_SYMBOL(lockdep_on);
 
 /*
@@ -294,12 +312,14 @@ EXPORT_SYMBOL(lockdep_on);
 #if VERBOSE
 # define HARDIRQ_VERBOSE       1
 # define SOFTIRQ_VERBOSE       1
+# define RECLAIM_VERBOSE       1
 #else
 # define HARDIRQ_VERBOSE       0
 # define SOFTIRQ_VERBOSE       0
+# define RECLAIM_VERBOSE       0
 #endif
 
-#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
+#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE || RECLAIM_VERBOSE
 /*
  * Quick filtering for interesting events:
  */
@@ -368,6 +388,19 @@ unsigned int nr_process_chains;
 unsigned int max_lockdep_depth;
 unsigned int max_recursion_depth;
 
+static unsigned int lockdep_dependency_gen_id;
+
+static bool lockdep_dependency_visit(struct lock_class *source,
+                                    unsigned int depth)
+{
+       if (!depth)
+               lockdep_dependency_gen_id++;
+       if (source->dep_gen_id == lockdep_dependency_gen_id)
+               return true;
+       source->dep_gen_id = lockdep_dependency_gen_id;
+       return false;
+}
+
 #ifdef CONFIG_DEBUG_LOCKDEP
 /*
  * We cannot printk in early bootup code. Not even early_printk()
@@ -401,30 +434,24 @@ atomic_t nr_find_usage_forwards_checks;
 atomic_t nr_find_usage_forwards_recursions;
 atomic_t nr_find_usage_backwards_checks;
 atomic_t nr_find_usage_backwards_recursions;
-# define debug_atomic_inc(ptr)         atomic_inc(ptr)
-# define debug_atomic_dec(ptr)         atomic_dec(ptr)
-# define debug_atomic_read(ptr)                atomic_read(ptr)
-#else
-# define debug_atomic_inc(ptr)         do { } while (0)
-# define debug_atomic_dec(ptr)         do { } while (0)
-# define debug_atomic_read(ptr)                0
 #endif
 
 /*
  * Locking printouts:
  */
 
+#define __USAGE(__STATE)                                               \
+       [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W",       \
+       [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W",         \
+       [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
+       [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
+
 static const char *usage_str[] =
 {
-       [LOCK_USED] =                   "initial-use ",
-       [LOCK_USED_IN_HARDIRQ] =        "in-hardirq-W",
-       [LOCK_USED_IN_SOFTIRQ] =        "in-softirq-W",
-       [LOCK_ENABLED_SOFTIRQS] =       "softirq-on-W",
-       [LOCK_ENABLED_HARDIRQS] =       "hardirq-on-W",
-       [LOCK_USED_IN_HARDIRQ_READ] =   "in-hardirq-R",
-       [LOCK_USED_IN_SOFTIRQ_READ] =   "in-softirq-R",
-       [LOCK_ENABLED_SOFTIRQS_READ] =  "softirq-on-R",
-       [LOCK_ENABLED_HARDIRQS_READ] =  "hardirq-on-R",
+#define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
+#include "lockdep_states.h"
+#undef LOCKDEP_STATE
+       [LOCK_USED] = "INITIAL USE",
 };
 
 const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
@@ -432,46 +459,45 @@ const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
        return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
 }
 
-void
-get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4)
+static inline unsigned long lock_flag(enum lock_usage_bit bit)
 {
-       *c1 = '.', *c2 = '.', *c3 = '.', *c4 = '.';
-
-       if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)
-               *c1 = '+';
-       else
-               if (class->usage_mask & LOCKF_ENABLED_HARDIRQS)
-                       *c1 = '-';
+       return 1UL << bit;
+}
 
-       if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ)
-               *c2 = '+';
-       else
-               if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS)
-                       *c2 = '-';
+static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
+{
+       char c = '.';
 
-       if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
-               *c3 = '-';
-       if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ) {
-               *c3 = '+';
-               if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
-                       *c3 = '?';
+       if (class->usage_mask & lock_flag(bit + 2))
+               c = '+';
+       if (class->usage_mask & lock_flag(bit)) {
+               c = '-';
+               if (class->usage_mask & lock_flag(bit + 2))
+                       c = '?';
        }
 
-       if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
-               *c4 = '-';
-       if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ) {
-               *c4 = '+';
-               if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
-                       *c4 = '?';
-       }
+       return c;
+}
+
+void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
+{
+       int i = 0;
+
+#define LOCKDEP_STATE(__STATE)                                                 \
+       usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE);     \
+       usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
+#include "lockdep_states.h"
+#undef LOCKDEP_STATE
+
+       usage[i] = '\0';
 }
 
 static void print_lock_name(struct lock_class *class)
 {
-       char str[KSYM_NAME_LEN], c1, c2, c3, c4;
+       char str[KSYM_NAME_LEN], usage[LOCK_USAGE_CHARS];
        const char *name;
 
-       get_usage_chars(class, &c1, &c2, &c3, &c4);
+       get_usage_chars(class, usage);
 
        name = class->name;
        if (!name) {
@@ -484,7 +510,7 @@ static void print_lock_name(struct lock_class *class)
                if (class->subclass)
                        printk("/%d", class->subclass);
        }
-       printk("){%c%c%c%c}", c1, c2, c3, c4);
+       printk("){%s}", usage);
 }
 
 static void print_lockdep_cache(struct lockdep_map *lock)
@@ -501,7 +527,7 @@ static void print_lockdep_cache(struct lockdep_map *lock)
 
 static void print_lock(struct held_lock *hlock)
 {
-       print_lock_name(hlock->class);
+       print_lock_name(hlock_class(hlock));
        printk(", at: ");
        print_ip_sym(hlock->acquire_ip);
 }
@@ -550,10 +576,14 @@ static void print_lock_class_header(struct lock_class *class, int depth)
 /*
  * printk all lock dependencies starting at <entry>:
  */
-static void print_lock_dependencies(struct lock_class *class, int depth)
+static void __used
+print_lock_dependencies(struct lock_class *class, int depth)
 {
        struct lock_list *entry;
 
+       if (lockdep_dependency_visit(class, depth))
+               return;
+
        if (DEBUG_LOCKS_WARN_ON(depth >= 20))
                return;
 
@@ -763,6 +793,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
 
                printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
                printk("turning off the locking correctness validator.\n");
+               dump_stack();
                return NULL;
        }
        class = lock_classes + nr_lock_classes++;
@@ -826,6 +857,7 @@ static struct lock_list *alloc_list_entry(void)
 
                printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
                printk("turning off the locking correctness validator.\n");
+               dump_stack();
                return NULL;
        }
        return list_entries + nr_list_entries++;
@@ -846,11 +878,11 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
        if (!entry)
                return 0;
 
-       entry->class = this;
-       entry->distance = distance;
        if (!save_trace(&entry->trace))
                return 0;
 
+       entry->class = this;
+       entry->distance = distance;
        /*
         * Since we never remove from the dependency list, the list can
         * be walked lockless by other CPUs, it's only allocation
@@ -928,7 +960,7 @@ static noinline int print_circular_bug_tail(void)
        if (debug_locks_silent)
                return 0;
 
-       this.class = check_source->class;
+       this.class = hlock_class(check_source);
        if (!save_trace(&this.trace))
                return 0;
 
@@ -955,6 +987,67 @@ static int noinline print_infinite_recursion_bug(void)
        return 0;
 }
 
+unsigned long __lockdep_count_forward_deps(struct lock_class *class,
+                                          unsigned int depth)
+{
+       struct lock_list *entry;
+       unsigned long ret = 1;
+
+       if (lockdep_dependency_visit(class, depth))
+               return 0;
+
+       /*
+        * Recurse this class's dependency list:
+        */
+       list_for_each_entry(entry, &class->locks_after, entry)
+               ret += __lockdep_count_forward_deps(entry->class, depth + 1);
+
+       return ret;
+}
+
+unsigned long lockdep_count_forward_deps(struct lock_class *class)
+{
+       unsigned long ret, flags;
+
+       local_irq_save(flags);
+       __raw_spin_lock(&lockdep_lock);
+       ret = __lockdep_count_forward_deps(class, 0);
+       __raw_spin_unlock(&lockdep_lock);
+       local_irq_restore(flags);
+
+       return ret;
+}
+
+unsigned long __lockdep_count_backward_deps(struct lock_class *class,
+                                           unsigned int depth)
+{
+       struct lock_list *entry;
+       unsigned long ret = 1;
+
+       if (lockdep_dependency_visit(class, depth))
+               return 0;
+       /*
+        * Recurse this class's dependency list:
+        */
+       list_for_each_entry(entry, &class->locks_before, entry)
+               ret += __lockdep_count_backward_deps(entry->class, depth + 1);
+
+       return ret;
+}
+
+unsigned long lockdep_count_backward_deps(struct lock_class *class)
+{
+       unsigned long ret, flags;
+
+       local_irq_save(flags);
+       __raw_spin_lock(&lockdep_lock);
+       ret = __lockdep_count_backward_deps(class, 0);
+       __raw_spin_unlock(&lockdep_lock);
+       local_irq_restore(flags);
+
+       return ret;
+}
+
 /*
  * Prove that the dependency graph starting at <entry> can not
  * lead to <target>. Print an error and return 0 if it does.
@@ -964,6 +1057,9 @@ check_noncircular(struct lock_class *source, unsigned int depth)
 {
        struct lock_list *entry;
 
+       if (lockdep_dependency_visit(source, depth))
+               return 1;
+
        debug_atomic_inc(&nr_cyclic_check_recursions);
        if (depth > max_recursion_depth)
                max_recursion_depth = depth;
@@ -973,7 +1069,7 @@ check_noncircular(struct lock_class *source, unsigned int depth)
         * Check this lock's dependency list:
         */
        list_for_each_entry(entry, &source->locks_after, entry) {
-               if (entry->class == check_target->class)
+               if (entry->class == hlock_class(check_target))
                        return print_circular_bug_header(entry, depth+1);
                debug_atomic_inc(&nr_cyclic_checks);
                if (!check_noncircular(entry->class, depth+1))
@@ -982,7 +1078,7 @@ check_noncircular(struct lock_class *source, unsigned int depth)
        return 1;
 }
 
-#ifdef CONFIG_TRACE_IRQFLAGS
+#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
 /*
  * Forwards and backwards subgraph searching, for the purposes of
  * proving that two subgraphs can be connected by a new dependency
@@ -1007,6 +1103,9 @@ find_usage_forwards(struct lock_class *source, unsigned int depth)
        struct lock_list *entry;
        int ret;
 
+       if (lockdep_dependency_visit(source, depth))
+               return 1;
+
        if (depth > max_recursion_depth)
                max_recursion_depth = depth;
        if (depth >= RECURSION_LIMIT)
@@ -1046,6 +1145,9 @@ find_usage_backwards(struct lock_class *source, unsigned int depth)
        struct lock_list *entry;
        int ret;
 
+       if (lockdep_dependency_visit(source, depth))
+               return 1;
+
        if (!__raw_spin_is_locked(&lockdep_lock))
                return DEBUG_LOCKS_WARN_ON(1);
 
@@ -1060,6 +1162,11 @@ find_usage_backwards(struct lock_class *source, unsigned int depth)
                return 2;
        }
 
+       if (!source && debug_locks_off_graph_unlock()) {
+               WARN_ON(1);
+               return 0;
+       }
+
        /*
         * Check this lock's dependency list:
         */
@@ -1099,9 +1206,9 @@ print_bad_irq_dependency(struct task_struct *curr,
        printk("\nand this task is already holding:\n");
        print_lock(prev);
        printk("which would create a new lock dependency:\n");
-       print_lock_name(prev->class);
+       print_lock_name(hlock_class(prev));
        printk(" ->");
-       print_lock_name(next->class);
+       print_lock_name(hlock_class(next));
        printk("\n");
 
        printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
@@ -1142,12 +1249,12 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
 
        find_usage_bit = bit_backwards;
        /* fills in <backwards_match> */
-       ret = find_usage_backwards(prev->class, 0);
+       ret = find_usage_backwards(hlock_class(prev), 0);
        if (!ret || ret == 1)
                return ret;
 
        find_usage_bit = bit_forwards;
-       ret = find_usage_forwards(next->class, 0);
+       ret = find_usage_forwards(hlock_class(next), 0);
        if (!ret || ret == 1)
                return ret;
        /* ret == 2 */
@@ -1155,9 +1262,49 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
                        bit_backwards, bit_forwards, irqclass);
 }
 
-static int
-check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
-               struct held_lock *next)
+static const char *state_names[] = {
+#define LOCKDEP_STATE(__STATE) \
+       __stringify(__STATE),
+#include "lockdep_states.h"
+#undef LOCKDEP_STATE
+};
+
+static const char *state_rnames[] = {
+#define LOCKDEP_STATE(__STATE) \
+       __stringify(__STATE)"-READ",
+#include "lockdep_states.h"
+#undef LOCKDEP_STATE
+};
+
+static inline const char *state_name(enum lock_usage_bit bit)
+{
+       return (bit & 1) ? state_rnames[bit >> 2] : state_names[bit >> 2];
+}
+
+static int exclusive_bit(int new_bit)
+{
+       /*
+        * USED_IN
+        * USED_IN_READ
+        * ENABLED
+        * ENABLED_READ
+        *
+        * bit 0 - write/read
+        * bit 1 - used_in/enabled
+        * bit 2+  state
+        */
+
+       int state = new_bit & ~3;
+       int dir = new_bit & 2;
+
+       /*
+        * keep state, bit flip the direction and strip read.
+        */
+       return state | (dir ^ 2);
+}
+
+static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
+                          struct held_lock *next, enum lock_usage_bit bit)
 {
        /*
         * Prove that the new dependency does not connect a hardirq-safe
@@ -1165,38 +1312,34 @@ check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
         * the backwards-subgraph starting at <prev>, and the
         * forwards-subgraph starting at <next>:
         */
-       if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ,
-                                       LOCK_ENABLED_HARDIRQS, "hard"))
+       if (!check_usage(curr, prev, next, bit,
+                          exclusive_bit(bit), state_name(bit)))
                return 0;
 
+       bit++; /* _READ */
+
        /*
         * Prove that the new dependency does not connect a hardirq-safe-read
         * lock with a hardirq-unsafe lock - to achieve this we search
         * the backwards-subgraph starting at <prev>, and the
         * forwards-subgraph starting at <next>:
         */
-       if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ_READ,
-                                       LOCK_ENABLED_HARDIRQS, "hard-read"))
+       if (!check_usage(curr, prev, next, bit,
+                          exclusive_bit(bit), state_name(bit)))
                return 0;
 
-       /*
-        * Prove that the new dependency does not connect a softirq-safe
-        * lock with a softirq-unsafe lock - to achieve this we search
-        * the backwards-subgraph starting at <prev>, and the
-        * forwards-subgraph starting at <next>:
-        */
-       if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ,
-                                       LOCK_ENABLED_SOFTIRQS, "soft"))
-               return 0;
-       /*
-        * Prove that the new dependency does not connect a softirq-safe-read
-        * lock with a softirq-unsafe lock - to achieve this we search
-        * the backwards-subgraph starting at <prev>, and the
-        * forwards-subgraph starting at <next>:
-        */
-       if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ_READ,
-                                       LOCK_ENABLED_SOFTIRQS, "soft"))
+       return 1;
+}
+
+static int
+check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
+               struct held_lock *next)
+{
+#define LOCKDEP_STATE(__STATE)                                         \
+       if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \
                return 0;
+#include "lockdep_states.h"
+#undef LOCKDEP_STATE
 
        return 1;
 }
@@ -1268,18 +1411,32 @@ check_deadlock(struct task_struct *curr, struct held_lock *next,
               struct lockdep_map *next_instance, int read)
 {
        struct held_lock *prev;
+       struct held_lock *nest = NULL;
        int i;
 
        for (i = 0; i < curr->lockdep_depth; i++) {
                prev = curr->held_locks + i;
-               if (prev->class != next->class)
+
+               if (prev->instance == next->nest_lock)
+                       nest = prev;
+
+               if (hlock_class(prev) != hlock_class(next))
                        continue;
+
                /*
                 * Allow read-after-read recursion of the same
                 * lock class (i.e. read_lock(lock)+read_lock(lock)):
                 */
                if ((read == 2) && prev->read)
                        return 2;
+
+               /*
+                * We're holding the nest_lock, which serializes this lock's
+                * nesting behaviour.
+                */
+               if (nest)
+                       return 2;
+
                return print_deadlock_bug(curr, prev, next);
        }
        return 1;
@@ -1325,7 +1482,7 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
         */
        check_source = next;
        check_target = prev;
-       if (!(check_noncircular(next->class, 0)))
+       if (!(check_noncircular(hlock_class(next), 0)))
                return print_circular_bug_tail();
 
        if (!check_prev_add_irq(curr, prev, next))
@@ -1349,8 +1506,8 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
         *  chains - the second one will be new, but L1 already has
         *  L2 added to its dependency list, due to the first chain.)
         */
-       list_for_each_entry(entry, &prev->class->locks_after, entry) {
-               if (entry->class == next->class) {
+       list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
+               if (entry->class == hlock_class(next)) {
                        if (distance == 1)
                                entry->distance = 1;
                        return 2;
@@ -1361,26 +1518,28 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
         * Ok, all validations passed, add the new lock
         * to the previous lock's dependency list:
         */
-       ret = add_lock_to_list(prev->class, next->class,
-                              &prev->class->locks_after, next->acquire_ip, distance);
+       ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
+                              &hlock_class(prev)->locks_after,
+                              next->acquire_ip, distance);
 
        if (!ret)
                return 0;
 
-       ret = add_lock_to_list(next->class, prev->class,
-                              &next->class->locks_before, next->acquire_ip, distance);
+       ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
+                              &hlock_class(next)->locks_before,
+                              next->acquire_ip, distance);
        if (!ret)
                return 0;
 
        /*
         * Debugging printouts:
         */
-       if (verbose(prev->class) || verbose(next->class)) {
+       if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) {
                graph_unlock();
                printk("\n new dependency: ");
-               print_lock_name(prev->class);
+               print_lock_name(hlock_class(prev));
                printk(" => ");
-               print_lock_name(next->class);
+               print_lock_name(hlock_class(next));
                printk("\n");
                dump_stack();
                return graph_lock();
@@ -1459,7 +1618,7 @@ out_bug:
 
 unsigned long nr_lock_chains;
 struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
-atomic_t nr_chain_hlocks;
+int nr_chain_hlocks;
 static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
 
 struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
@@ -1477,11 +1636,11 @@ static inline int lookup_chain_cache(struct task_struct *curr,
                                     struct held_lock *hlock,
                                     u64 chain_key)
 {
-       struct lock_class *class = hlock->class;
+       struct lock_class *class = hlock_class(hlock);
        struct list_head *hash_head = chainhashentry(chain_key);
        struct lock_chain *chain;
        struct held_lock *hlock_curr, *hlock_next;
-       int i, j, n;
+       int i, j, n, cn;
 
        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
                return 0;
@@ -1525,6 +1684,7 @@ cache_hit:
 
                printk("BUG: MAX_LOCKDEP_CHAINS too low!\n");
                printk("turning off the locking correctness validator.\n");
+               dump_stack();
                return 0;
        }
        chain = lock_chains + nr_lock_chains++;
@@ -1540,11 +1700,17 @@ cache_hit:
        }
        i++;
        chain->depth = curr->lockdep_depth + 1 - i;
-       n = atomic_add_return(chain->depth, &nr_chain_hlocks);
-       if (unlikely(n < MAX_LOCKDEP_CHAIN_HLOCKS)) {
-               chain->base = n - chain->depth;
+       cn = nr_chain_hlocks;
+       while (cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS) {
+               n = cmpxchg(&nr_chain_hlocks, cn, cn + chain->depth);
+               if (n == cn)
+                       break;
+               cn = n;
+       }
+       if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
+               chain->base = cn;
                for (j = 0; j < chain->depth - 1; j++, i++) {
-                       int lock_id = curr->held_locks[i].class - lock_classes;
+                       int lock_id = curr->held_locks[i].class_idx - 1;
                        chain_hlocks[chain->base + j] = lock_id;
                }
                chain_hlocks[chain->base + j] = class - lock_classes;
@@ -1633,14 +1799,13 @@ static void check_chain_key(struct task_struct *curr)
                hlock = curr->held_locks + i;
                if (chain_key != hlock->prev_chain_key) {
                        debug_locks_off();
-                       printk("hm#1, depth: %u [%u], %016Lx != %016Lx\n",
+                       WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
                                curr->lockdep_depth, i,
                                (unsigned long long)chain_key,
                                (unsigned long long)hlock->prev_chain_key);
-                       WARN_ON(1);
                        return;
                }
-               id = hlock->class - lock_classes;
+               id = hlock->class_idx - 1;
                if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
                        return;
 
@@ -1652,11 +1817,10 @@ static void check_chain_key(struct task_struct *curr)
        }
        if (chain_key != curr->curr_chain_key) {
                debug_locks_off();
-               printk("hm#2, depth: %u [%u], %016Lx != %016Lx\n",
+               WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
                        curr->lockdep_depth, i,
                        (unsigned long long)chain_key,
                        (unsigned long long)curr->curr_chain_key);
-               WARN_ON(1);
        }
 #endif
 }
@@ -1685,7 +1849,7 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
        print_lock(this);
 
        printk("{%s} state was registered at:\n", usage_str[prev_bit]);
-       print_stack_trace(this->class->usage_traces + prev_bit, 1);
+       print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
 
        print_irqtrace_events(curr);
        printk("\nother info that might help us debug this:\n");
@@ -1704,7 +1868,7 @@ static inline int
 valid_state(struct task_struct *curr, struct held_lock *this,
            enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
 {
-       if (unlikely(this->class->usage_mask & (1 << bad_bit)))
+       if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit)))
                return print_usage_bug(curr, this, bad_bit, new_bit);
        return 1;
 }
@@ -1712,7 +1876,7 @@ valid_state(struct task_struct *curr, struct held_lock *this,
 static int mark_lock(struct task_struct *curr, struct held_lock *this,
                     enum lock_usage_bit new_bit);
 
-#ifdef CONFIG_TRACE_IRQFLAGS
+#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
 
 /*
  * print irq inversion bug:
@@ -1733,9 +1897,9 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
                curr->comm, task_pid_nr(curr));
        print_lock(this);
        if (forwards)
-               printk("but this lock took another, %s-irq-unsafe lock in the past:\n", irqclass);
+               printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
        else
-               printk("but this lock was taken by another, %s-irq-safe lock in the past:\n", irqclass);
+               printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
        print_lock_name(other);
        printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
 
@@ -1743,7 +1907,7 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
        lockdep_print_held_locks(curr);
 
        printk("\nthe first lock's dependencies:\n");
-       print_lock_dependencies(this->class, 0);
+       print_lock_dependencies(hlock_class(this), 0);
 
        printk("\nthe second lock's dependencies:\n");
        print_lock_dependencies(other, 0);
@@ -1766,7 +1930,7 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this,
 
        find_usage_bit = bit;
        /* fills in <forwards_match> */
-       ret = find_usage_forwards(this->class, 0);
+       ret = find_usage_forwards(hlock_class(this), 0);
        if (!ret || ret == 1)
                return ret;
 
@@ -1785,7 +1949,7 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
 
        find_usage_bit = bit;
        /* fills in <backwards_match> */
-       ret = find_usage_backwards(this->class, 0);
+       ret = find_usage_backwards(hlock_class(this), 0);
        if (!ret || ret == 1)
                return ret;
 
@@ -1805,7 +1969,7 @@ void print_irqtrace_events(struct task_struct *curr)
        print_ip_sym(curr->softirq_disable_ip);
 }
 
-static int hardirq_verbose(struct lock_class *class)
+static int HARDIRQ_verbose(struct lock_class *class)
 {
 #if HARDIRQ_VERBOSE
        return class_filter(class);
@@ -1813,7 +1977,7 @@ static int hardirq_verbose(struct lock_class *class)
        return 0;
 }
 
-static int softirq_verbose(struct lock_class *class)
+static int SOFTIRQ_verbose(struct lock_class *class)
 {
 #if SOFTIRQ_VERBOSE
        return class_filter(class);
@@ -1821,185 +1985,95 @@ static int softirq_verbose(struct lock_class *class)
        return 0;
 }
 
+static int RECLAIM_FS_verbose(struct lock_class *class)
+{
+#if RECLAIM_VERBOSE
+       return class_filter(class);
+#endif
+       return 0;
+}
+
 #define STRICT_READ_CHECKS     1
 
-static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
+static int (*state_verbose_f[])(struct lock_class *class) = {
+#define LOCKDEP_STATE(__STATE) \
+       __STATE##_verbose,
+#include "lockdep_states.h"
+#undef LOCKDEP_STATE
+};
+
+static inline int state_verbose(enum lock_usage_bit bit,
+                               struct lock_class *class)
+{
+       return state_verbose_f[bit >> 2](class);
+}
+
+typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
+                            enum lock_usage_bit bit, const char *name);
+
+static int
+mark_lock_irq(struct task_struct *curr, struct held_lock *this,
                enum lock_usage_bit new_bit)
 {
-       int ret = 1;
+       int excl_bit = exclusive_bit(new_bit);
+       int read = new_bit & 1;
+       int dir = new_bit & 2;
 
-       switch(new_bit) {
-       case LOCK_USED_IN_HARDIRQ:
-               if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))
-                       return 0;
-               if (!valid_state(curr, this, new_bit,
-                                LOCK_ENABLED_HARDIRQS_READ))
-                       return 0;
-               /*
-                * just marked it hardirq-safe, check that this lock
-                * took no hardirq-unsafe lock in the past:
-                */
-               if (!check_usage_forwards(curr, this,
-                                         LOCK_ENABLED_HARDIRQS, "hard"))
-                       return 0;
-#if STRICT_READ_CHECKS
-               /*
-                * just marked it hardirq-safe, check that this lock
-                * took no hardirq-unsafe-read lock in the past:
-                */
-               if (!check_usage_forwards(curr, this,
-                               LOCK_ENABLED_HARDIRQS_READ, "hard-read"))
-                       return 0;
-#endif
-               if (hardirq_verbose(this->class))
-                       ret = 2;
-               break;
-       case LOCK_USED_IN_SOFTIRQ:
-               if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS))
-                       return 0;
-               if (!valid_state(curr, this, new_bit,
-                                LOCK_ENABLED_SOFTIRQS_READ))
-                       return 0;
-               /*
-                * just marked it softirq-safe, check that this lock
-                * took no softirq-unsafe lock in the past:
-                */
-               if (!check_usage_forwards(curr, this,
-                                         LOCK_ENABLED_SOFTIRQS, "soft"))
-                       return 0;
-#if STRICT_READ_CHECKS
-               /*
-                * just marked it softirq-safe, check that this lock
-                * took no softirq-unsafe-read lock in the past:
-                */
-               if (!check_usage_forwards(curr, this,
-                               LOCK_ENABLED_SOFTIRQS_READ, "soft-read"))
-                       return 0;
-#endif
-               if (softirq_verbose(this->class))
-                       ret = 2;
-               break;
-       case LOCK_USED_IN_HARDIRQ_READ:
-               if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))
-                       return 0;
-               /*
-                * just marked it hardirq-read-safe, check that this lock
-                * took no hardirq-unsafe lock in the past:
-                */
-               if (!check_usage_forwards(curr, this,
-                                         LOCK_ENABLED_HARDIRQS, "hard"))
-                       return 0;
-               if (hardirq_verbose(this->class))
-                       ret = 2;
-               break;
-       case LOCK_USED_IN_SOFTIRQ_READ:
-               if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS))
-                       return 0;
-               /*
-                * just marked it softirq-read-safe, check that this lock
-                * took no softirq-unsafe lock in the past:
-                */
-               if (!check_usage_forwards(curr, this,
-                                         LOCK_ENABLED_SOFTIRQS, "soft"))
-                       return 0;
-               if (softirq_verbose(this->class))
-                       ret = 2;
-               break;
-       case LOCK_ENABLED_HARDIRQS:
-               if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
-                       return 0;
-               if (!valid_state(curr, this, new_bit,
-                                LOCK_USED_IN_HARDIRQ_READ))
-                       return 0;
-               /*
-                * just marked it hardirq-unsafe, check that no hardirq-safe
-                * lock in the system ever took it in the past:
-                */
-               if (!check_usage_backwards(curr, this,
-                                          LOCK_USED_IN_HARDIRQ, "hard"))
-                       return 0;
-#if STRICT_READ_CHECKS
-               /*
-                * just marked it hardirq-unsafe, check that no
-                * hardirq-safe-read lock in the system ever took
-                * it in the past:
-                */
-               if (!check_usage_backwards(curr, this,
-                                  LOCK_USED_IN_HARDIRQ_READ, "hard-read"))
-                       return 0;
-#endif
-               if (hardirq_verbose(this->class))
-                       ret = 2;
-               break;
-       case LOCK_ENABLED_SOFTIRQS:
-               if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
-                       return 0;
-               if (!valid_state(curr, this, new_bit,
-                                LOCK_USED_IN_SOFTIRQ_READ))
-                       return 0;
-               /*
-                * just marked it softirq-unsafe, check that no softirq-safe
-                * lock in the system ever took it in the past:
-                */
-               if (!check_usage_backwards(curr, this,
-                                          LOCK_USED_IN_SOFTIRQ, "soft"))
-                       return 0;
-#if STRICT_READ_CHECKS
-               /*
-                * just marked it softirq-unsafe, check that no
-                * softirq-safe-read lock in the system ever took
-                * it in the past:
-                */
-               if (!check_usage_backwards(curr, this,
-                                  LOCK_USED_IN_SOFTIRQ_READ, "soft-read"))
-                       return 0;
-#endif
-               if (softirq_verbose(this->class))
-                       ret = 2;
-               break;
-       case LOCK_ENABLED_HARDIRQS_READ:
-               if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
-                       return 0;
-#if STRICT_READ_CHECKS
-               /*
-                * just marked it hardirq-read-unsafe, check that no
-                * hardirq-safe lock in the system ever took it in the past:
-                */
-               if (!check_usage_backwards(curr, this,
-                                          LOCK_USED_IN_HARDIRQ, "hard"))
-                       return 0;
-#endif
-               if (hardirq_verbose(this->class))
-                       ret = 2;
-               break;
-       case LOCK_ENABLED_SOFTIRQS_READ:
-               if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
+       /*
+        * mark USED_IN has to look forwards -- to ensure no dependency
+        * has ENABLED state, which would allow recursion deadlocks.
+        *
+        * mark ENABLED has to look backwards -- to ensure no dependee
+        * has USED_IN state, which, again, would allow  recursion deadlocks.
+        */
+       check_usage_f usage = dir ?
+               check_usage_backwards : check_usage_forwards;
+
+       /*
+        * Validate that this particular lock does not have conflicting
+        * usage states.
+        */
+       if (!valid_state(curr, this, new_bit, excl_bit))
+               return 0;
+
+       /*
+        * Validate that the lock dependencies don't have conflicting usage
+        * states.
+        */
+       if ((!read || !dir || STRICT_READ_CHECKS) &&
+                       !usage(curr, this, excl_bit, state_name(new_bit & ~1)))
+               return 0;
+
+       /*
+        * Check for read in write conflicts
+        */
+       if (!read) {
+               if (!valid_state(curr, this, new_bit, excl_bit + 1))
                        return 0;
-#if STRICT_READ_CHECKS
-               /*
-                * just marked it softirq-read-unsafe, check that no
-                * softirq-safe lock in the system ever took it in the past:
-                */
-               if (!check_usage_backwards(curr, this,
-                                          LOCK_USED_IN_SOFTIRQ, "soft"))
+
+               if (STRICT_READ_CHECKS &&
+                       !usage(curr, this, excl_bit + 1,
+                               state_name(new_bit + 1)))
                        return 0;
-#endif
-               if (softirq_verbose(this->class))
-                       ret = 2;
-               break;
-       default:
-               WARN_ON(1);
-               break;
        }
 
-       return ret;
+       if (state_verbose(new_bit, hlock_class(this)))
+               return 2;
+
+       return 1;
 }
 
+enum mark_type {
+#define LOCKDEP_STATE(__STATE) __STATE,
+#include "lockdep_states.h"
+#undef LOCKDEP_STATE
+};
+
 /*
  * Mark all held locks with a usage bit:
  */
 static int
-mark_held_locks(struct task_struct *curr, int hardirq)
+mark_held_locks(struct task_struct *curr, enum mark_type mark)
 {
        enum lock_usage_bit usage_bit;
        struct held_lock *hlock;
@@ -2008,17 +2082,12 @@ mark_held_locks(struct task_struct *curr, int hardirq)
        for (i = 0; i < curr->lockdep_depth; i++) {
                hlock = curr->held_locks + i;
 
-               if (hardirq) {
-                       if (hlock->read)
-                               usage_bit = LOCK_ENABLED_HARDIRQS_READ;
-                       else
-                               usage_bit = LOCK_ENABLED_HARDIRQS;
-               } else {
-                       if (hlock->read)
-                               usage_bit = LOCK_ENABLED_SOFTIRQS_READ;
-                       else
-                               usage_bit = LOCK_ENABLED_SOFTIRQS;
-               }
+               usage_bit = 2 + (mark << 2); /* ENABLED */
+               if (hlock->read)
+                       usage_bit += 1; /* READ */
+
+               BUG_ON(usage_bit >= LOCK_USAGE_STATES);
+
                if (!mark_lock(curr, hlock, usage_bit))
                        return 0;
        }
@@ -2045,10 +2114,11 @@ void early_boot_irqs_on(void)
 /*
  * Hardirqs will be enabled:
  */
-void trace_hardirqs_on(void)
+void trace_hardirqs_on_caller(unsigned long ip)
 {
        struct task_struct *curr = current;
-       unsigned long ip;
+
+       time_hardirqs_on(CALLER_ADDR0, ip);
 
        if (unlikely(!debug_locks || current->lockdep_recursion))
                return;
@@ -2062,7 +2132,6 @@ void trace_hardirqs_on(void)
        }
        /* we'll do an OFF -> ON transition: */
        curr->hardirqs_enabled = 1;
-       ip = (unsigned long) __builtin_return_address(0);
 
        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
                return;
@@ -2072,7 +2141,7 @@ void trace_hardirqs_on(void)
         * We are going to turn hardirqs on, so set the
         * usage bit for all held locks:
         */
-       if (!mark_held_locks(curr, 1))
+       if (!mark_held_locks(curr, HARDIRQ))
                return;
        /*
         * If we have softirqs enabled, then set the usage
@@ -2080,23 +2149,30 @@ void trace_hardirqs_on(void)
         * this bit from being set before)
         */
        if (curr->softirqs_enabled)
-               if (!mark_held_locks(curr, 0))
+               if (!mark_held_locks(curr, SOFTIRQ))
                        return;
 
        curr->hardirq_enable_ip = ip;
        curr->hardirq_enable_event = ++curr->irq_events;
        debug_atomic_inc(&hardirqs_on_events);
 }
+EXPORT_SYMBOL(trace_hardirqs_on_caller);
 
+void trace_hardirqs_on(void)
+{
+       trace_hardirqs_on_caller(CALLER_ADDR0);
+}
 EXPORT_SYMBOL(trace_hardirqs_on);
 
 /*
  * Hardirqs were disabled:
  */
-void trace_hardirqs_off(void)
+void trace_hardirqs_off_caller(unsigned long ip)
 {
        struct task_struct *curr = current;
 
+       time_hardirqs_off(CALLER_ADDR0, ip);
+
        if (unlikely(!debug_locks || current->lockdep_recursion))
                return;
 
@@ -2108,13 +2184,18 @@ void trace_hardirqs_off(void)
                 * We have done an ON -> OFF transition:
                 */
                curr->hardirqs_enabled = 0;
-               curr->hardirq_disable_ip = _RET_IP_;
+               curr->hardirq_disable_ip = ip;
                curr->hardirq_disable_event = ++curr->irq_events;
                debug_atomic_inc(&hardirqs_off_events);
        } else
                debug_atomic_inc(&redundant_hardirqs_off);
 }
+EXPORT_SYMBOL(trace_hardirqs_off_caller);
 
+void trace_hardirqs_off(void)
+{
+       trace_hardirqs_off_caller(CALLER_ADDR0);
+}
 EXPORT_SYMBOL(trace_hardirqs_off);
 
 /*
@@ -2148,7 +2229,7 @@ void trace_softirqs_on(unsigned long ip)
         * enabled too:
         */
        if (curr->hardirqs_enabled)
-               mark_held_locks(curr, 0);
+               mark_held_locks(curr, SOFTIRQ);
 }
 
 /*
@@ -2177,6 +2258,48 @@ void trace_softirqs_off(unsigned long ip)
                debug_atomic_inc(&redundant_softirqs_off);
 }
 
+static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags)
+{
+       struct task_struct *curr = current;
+
+       if (unlikely(!debug_locks))
+               return;
+
+       /* no reclaim without waiting on it */
+       if (!(gfp_mask & __GFP_WAIT))
+               return;
+
+       /* this guy won't enter reclaim */
+       if ((curr->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC))
+               return;
+
+       /* We're only interested __GFP_FS allocations for now */
+       if (!(gfp_mask & __GFP_FS))
+               return;
+
+       if (DEBUG_LOCKS_WARN_ON(irqs_disabled_flags(flags)))
+               return;
+
+       mark_held_locks(curr, RECLAIM_FS);
+}
+
+static void check_flags(unsigned long flags);
+
+void lockdep_trace_alloc(gfp_t gfp_mask)
+{
+       unsigned long flags;
+
+       if (unlikely(current->lockdep_recursion))
+               return;
+
+       raw_local_irq_save(flags);
+       check_flags(flags);
+       current->lockdep_recursion = 1;
+       __lockdep_trace_alloc(gfp_mask, flags);
+       current->lockdep_recursion = 0;
+       raw_local_irq_restore(flags);
+}
+
 static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
 {
        /*
@@ -2205,19 +2328,35 @@ static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
        if (!hlock->hardirqs_off) {
                if (hlock->read) {
                        if (!mark_lock(curr, hlock,
-                                       LOCK_ENABLED_HARDIRQS_READ))
+                                       LOCK_ENABLED_HARDIRQ_READ))
                                return 0;
                        if (curr->softirqs_enabled)
                                if (!mark_lock(curr, hlock,
-                                               LOCK_ENABLED_SOFTIRQS_READ))
+                                               LOCK_ENABLED_SOFTIRQ_READ))
                                        return 0;
                } else {
                        if (!mark_lock(curr, hlock,
-                                       LOCK_ENABLED_HARDIRQS))
+                                       LOCK_ENABLED_HARDIRQ))
                                return 0;
                        if (curr->softirqs_enabled)
                                if (!mark_lock(curr, hlock,
-                                               LOCK_ENABLED_SOFTIRQS))
+                                               LOCK_ENABLED_SOFTIRQ))
+                                       return 0;
+               }
+       }
+
+       /*
+        * We reuse the irq context infrastructure more broadly as a general
+        * context checking code. This tests GFP_FS recursion (a lock taken
+        * during reclaim for a GFP_FS allocation is held over a GFP_FS
+        * allocation).
+        */
+       if (!hlock->trylock && (curr->lockdep_reclaim_gfp & __GFP_FS)) {
+               if (hlock->read) {
+                       if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS_READ))
+                                       return 0;
+               } else {
+                       if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS))
                                        return 0;
                }
        }
@@ -2272,13 +2411,17 @@ static inline int separate_irq_context(struct task_struct *curr,
        return 0;
 }
 
+void lockdep_trace_alloc(gfp_t gfp_mask)
+{
+}
+
 #endif
 
 /*
  * Mark a lock with a usage bit, and validate the state transition:
  */
 static int mark_lock(struct task_struct *curr, struct held_lock *this,
-                    enum lock_usage_bit new_bit)
+                            enum lock_usage_bit new_bit)
 {
        unsigned int new_mask = 1 << new_bit, ret = 1;
 
@@ -2286,7 +2429,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
         * If already set then do not dirty the cacheline,
         * nor do any checks:
         */
-       if (likely(this->class->usage_mask & new_mask))
+       if (likely(hlock_class(this)->usage_mask & new_mask))
                return 1;
 
        if (!graph_lock())
@@ -2294,25 +2437,24 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
        /*
         * Make sure we didnt race:
         */
-       if (unlikely(this->class->usage_mask & new_mask)) {
+       if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
                graph_unlock();
                return 1;
        }
 
-       this->class->usage_mask |= new_mask;
+       hlock_class(this)->usage_mask |= new_mask;
 
-       if (!save_trace(this->class->usage_traces + new_bit))
+       if (!save_trace(hlock_class(this)->usage_traces + new_bit))
                return 0;
 
        switch (new_bit) {
-       case LOCK_USED_IN_HARDIRQ:
-       case LOCK_USED_IN_SOFTIRQ:
-       case LOCK_USED_IN_HARDIRQ_READ:
-       case LOCK_USED_IN_SOFTIRQ_READ:
-       case LOCK_ENABLED_HARDIRQS:
-       case LOCK_ENABLED_SOFTIRQS:
-       case LOCK_ENABLED_HARDIRQS_READ:
-       case LOCK_ENABLED_SOFTIRQS_READ:
+#define LOCKDEP_STATE(__STATE)                 \
+       case LOCK_USED_IN_##__STATE:            \
+       case LOCK_USED_IN_##__STATE##_READ:     \
+       case LOCK_ENABLED_##__STATE:            \
+       case LOCK_ENABLED_##__STATE##_READ:
+#include "lockdep_states.h"
+#undef LOCKDEP_STATE
                ret = mark_lock_irq(curr, this, new_bit);
                if (!ret)
                        return 0;
@@ -2372,7 +2514,6 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
        if (subclass)
                register_lock_class(lock, subclass, 1);
 }
-
 EXPORT_SYMBOL_GPL(lockdep_init_map);
 
 /*
@@ -2381,7 +2522,7 @@ EXPORT_SYMBOL_GPL(lockdep_init_map);
  */
 static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
                          int trylock, int read, int check, int hardirqs_off,
-                         unsigned long ip)
+                         struct lockdep_map *nest_lock, unsigned long ip)
 {
        struct task_struct *curr = current;
        struct lock_class *class = NULL;
@@ -2403,6 +2544,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
                debug_locks_off();
                printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n");
                printk("turning off the locking correctness validator.\n");
+               dump_stack();
                return 0;
        }
 
@@ -2435,14 +2577,16 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
                return 0;
 
        hlock = curr->held_locks + depth;
-
-       hlock->class = class;
+       if (DEBUG_LOCKS_WARN_ON(!class))
+               return 0;
+       hlock->class_idx = class - lock_classes + 1;
        hlock->acquire_ip = ip;
        hlock->instance = lock;
+       hlock->nest_lock = nest_lock;
        hlock->trylock = trylock;
        hlock->read = read;
        hlock->check = check;
-       hlock->hardirqs_off = hardirqs_off;
+       hlock->hardirqs_off = !!hardirqs_off;
 #ifdef CONFIG_LOCK_STAT
        hlock->waittime_stamp = 0;
        hlock->holdtime_stamp = sched_clock();
@@ -2497,6 +2641,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
                debug_locks_off();
                printk("BUG: MAX_LOCK_DEPTH too low!\n");
                printk("turning off the locking correctness validator.\n");
+               dump_stack();
                return 0;
        }
 
@@ -2550,6 +2695,57 @@ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
        return 1;
 }
 
+static int
+__lock_set_class(struct lockdep_map *lock, const char *name,
+                struct lock_class_key *key, unsigned int subclass,
+                unsigned long ip)
+{
+       struct task_struct *curr = current;
+       struct held_lock *hlock, *prev_hlock;
+       struct lock_class *class;
+       unsigned int depth;
+       int i;
+
+       depth = curr->lockdep_depth;
+       if (DEBUG_LOCKS_WARN_ON(!depth))
+               return 0;
+
+       prev_hlock = NULL;
+       for (i = depth-1; i >= 0; i--) {
+               hlock = curr->held_locks + i;
+               /*
+                * We must not cross into another context:
+                */
+               if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
+                       break;
+               if (hlock->instance == lock)
+                       goto found_it;
+               prev_hlock = hlock;
+       }
+       return print_unlock_inbalance_bug(curr, lock, ip);
+
+found_it:
+       lockdep_init_map(lock, name, key, 0);
+       class = register_lock_class(lock, subclass, 0);
+       hlock->class_idx = class - lock_classes + 1;
+
+       curr->lockdep_depth = i;
+       curr->curr_chain_key = hlock->prev_chain_key;
+
+       for (; i < depth; i++) {
+               hlock = curr->held_locks + i;
+               if (!__lock_acquire(hlock->instance,
+                       hlock_class(hlock)->subclass, hlock->trylock,
+                               hlock->read, hlock->check, hlock->hardirqs_off,
+                               hlock->nest_lock, hlock->acquire_ip))
+                       return 0;
+       }
+
+       if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
+               return 0;
+       return 1;
+}
+
 /*
  * Remove the lock to the list of currently held locks in a
  * potentially non-nested (out of order) manner. This is a
@@ -2600,9 +2796,9 @@ found_it:
        for (i++; i < depth; i++) {
                hlock = curr->held_locks + i;
                if (!__lock_acquire(hlock->instance,
-                       hlock->class->subclass, hlock->trylock,
+                       hlock_class(hlock)->subclass, hlock->trylock,
                                hlock->read, hlock->check, hlock->hardirqs_off,
-                               hlock->acquire_ip))
+                               hlock->nest_lock, hlock->acquire_ip))
                        return 0;
        }
 
@@ -2645,7 +2841,7 @@ static int lock_release_nested(struct task_struct *curr,
 
 #ifdef CONFIG_DEBUG_LOCKDEP
        hlock->prev_chain_key = 0;
-       hlock->class = NULL;
+       hlock->class_idx = 0;
        hlock->acquire_ip = 0;
        hlock->irq_context = 0;
 #endif
@@ -2682,7 +2878,8 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
  */
 static void check_flags(unsigned long flags)
 {
-#if defined(CONFIG_DEBUG_LOCKDEP) && defined(CONFIG_TRACE_IRQFLAGS)
+#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
+    defined(CONFIG_TRACE_IRQFLAGS)
        if (!debug_locks)
                return;
 
@@ -2713,17 +2910,38 @@ static void check_flags(unsigned long flags)
 #endif
 }
 
+void lock_set_class(struct lockdep_map *lock, const char *name,
+                   struct lock_class_key *key, unsigned int subclass,
+                   unsigned long ip)
+{
+       unsigned long flags;
+
+       if (unlikely(current->lockdep_recursion))
+               return;
+
+       raw_local_irq_save(flags);
+       current->lockdep_recursion = 1;
+       check_flags(flags);
+       if (__lock_set_class(lock, name, key, subclass, ip))
+               check_chain_key(current);
+       current->lockdep_recursion = 0;
+       raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(lock_set_class);
+
+DEFINE_TRACE(lock_acquire);
+
 /*
  * We are not always called with irqs disabled - do that here,
  * and also avoid lockdep recursion:
  */
 void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
-                 int trylock, int read, int check, unsigned long ip)
+                         int trylock, int read, int check,
+                         struct lockdep_map *nest_lock, unsigned long ip)
 {
        unsigned long flags;
 
-       if (unlikely(!lock_stat && !prove_locking))
-               return;
+       trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
 
        if (unlikely(current->lockdep_recursion))
                return;
@@ -2733,19 +2951,20 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
 
        current->lockdep_recursion = 1;
        __lock_acquire(lock, subclass, trylock, read, check,
-                      irqs_disabled_flags(flags), ip);
+                      irqs_disabled_flags(flags), nest_lock, ip);
        current->lockdep_recursion = 0;
        raw_local_irq_restore(flags);
 }
-
 EXPORT_SYMBOL_GPL(lock_acquire);
 
-void lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
+DEFINE_TRACE(lock_release);
+
+void lock_release(struct lockdep_map *lock, int nested,
+                         unsigned long ip)
 {
        unsigned long flags;
 
-       if (unlikely(!lock_stat && !prove_locking))
-               return;
+       trace_lock_release(lock, nested, ip);
 
        if (unlikely(current->lockdep_recursion))
                return;
@@ -2757,9 +2976,18 @@ void lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
        current->lockdep_recursion = 0;
        raw_local_irq_restore(flags);
 }
-
 EXPORT_SYMBOL_GPL(lock_release);
 
+void lockdep_set_current_reclaim_state(gfp_t gfp_mask)
+{
+       current->lockdep_reclaim_gfp = gfp_mask;
+}
+
+void lockdep_clear_current_reclaim_state(void)
+{
+       current->lockdep_reclaim_gfp = 0;
+}
+
 #ifdef CONFIG_LOCK_STAT
 static int
 print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
@@ -2795,7 +3023,7 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
        struct held_lock *hlock, *prev_hlock;
        struct lock_class_stats *stats;
        unsigned int depth;
-       int i, point;
+       int i, contention_point, contending_point;
 
        depth = curr->lockdep_depth;
        if (DEBUG_LOCKS_WARN_ON(!depth))
@@ -2819,18 +3047,22 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
 found_it:
        hlock->waittime_stamp = sched_clock();
 
-       point = lock_contention_point(hlock->class, ip);
+       contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
+       contending_point = lock_point(hlock_class(hlock)->contending_point,
+                                     lock->ip);
 
-       stats = get_lock_stats(hlock->class);
-       if (point < ARRAY_SIZE(stats->contention_point))
-               stats->contention_point[i]++;
+       stats = get_lock_stats(hlock_class(hlock));
+       if (contention_point < LOCKSTAT_POINTS)
+               stats->contention_point[contention_point]++;
+       if (contending_point < LOCKSTAT_POINTS)
+               stats->contending_point[contending_point]++;
        if (lock->cpu != smp_processor_id())
                stats->bounces[bounce_contended + !!hlock->read]++;
        put_lock_stats(stats);
 }
 
 static void
-__lock_acquired(struct lockdep_map *lock)
+__lock_acquired(struct lockdep_map *lock, unsigned long ip)
 {
        struct task_struct *curr = current;
        struct held_lock *hlock, *prev_hlock;
@@ -2867,7 +3099,7 @@ found_it:
                hlock->holdtime_stamp = now;
        }
 
-       stats = get_lock_stats(hlock->class);
+       stats = get_lock_stats(hlock_class(hlock));
        if (waittime) {
                if (hlock->read)
                        lock_time_inc(&stats->read_waittime, waittime);
@@ -2879,12 +3111,17 @@ found_it:
        put_lock_stats(stats);
 
        lock->cpu = cpu;
+       lock->ip = ip;
 }
 
+DEFINE_TRACE(lock_contended);
+
 void lock_contended(struct lockdep_map *lock, unsigned long ip)
 {
        unsigned long flags;
 
+       trace_lock_contended(lock, ip);
+
        if (unlikely(!lock_stat))
                return;
 
@@ -2900,10 +3137,14 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
 }
 EXPORT_SYMBOL_GPL(lock_contended);
 
-void lock_acquired(struct lockdep_map *lock)
+DEFINE_TRACE(lock_acquired);
+
+void lock_acquired(struct lockdep_map *lock, unsigned long ip)
 {
        unsigned long flags;
 
+       trace_lock_acquired(lock, ip);
+
        if (unlikely(!lock_stat))
                return;
 
@@ -2913,7 +3154,7 @@ void lock_acquired(struct lockdep_map *lock)
        raw_local_irq_save(flags);
        check_flags(flags);
        current->lockdep_recursion = 1;
-       __lock_acquired(lock);
+       __lock_acquired(lock, ip);
        current->lockdep_recursion = 0;
        raw_local_irq_restore(flags);
 }
@@ -2962,6 +3203,7 @@ static void zap_class(struct lock_class *class)
        list_del_rcu(&class->hash_entry);
        list_del_rcu(&class->lock_entry);
 
+       class->key = NULL;
 }
 
 static inline int within(const void *addr, void *start, unsigned long size)
@@ -3071,10 +3313,10 @@ void __init lockdep_info(void)
 {
        printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
 
-       printk("... MAX_LOCKDEP_SUBCLASSES:    %lu\n", MAX_LOCKDEP_SUBCLASSES);
+       printk("... MAX_LOCKDEP_SUBCLASSES:  %lu\n", MAX_LOCKDEP_SUBCLASSES);
        printk("... MAX_LOCK_DEPTH:          %lu\n", MAX_LOCK_DEPTH);
        printk("... MAX_LOCKDEP_KEYS:        %lu\n", MAX_LOCKDEP_KEYS);
-       printk("... CLASSHASH_SIZE:           %lu\n", CLASSHASH_SIZE);
+       printk("... CLASSHASH_SIZE:          %lu\n", CLASSHASH_SIZE);
        printk("... MAX_LOCKDEP_ENTRIES:     %lu\n", MAX_LOCKDEP_ENTRIES);
        printk("... MAX_LOCKDEP_CHAINS:      %lu\n", MAX_LOCKDEP_CHAINS);
        printk("... CHAINHASH_SIZE:          %lu\n", CHAINHASH_SIZE);
@@ -3210,9 +3452,10 @@ retry:
                }
                printk(" ignoring it.\n");
                unlock = 0;
+       } else {
+               if (count != 10)
+                       printk(KERN_CONT " locked it.\n");
        }
-       if (count != 10)
-               printk(" locked it.\n");
 
        do_each_thread(g, p) {
                /*
@@ -3235,7 +3478,6 @@ retry:
        if (unlock)
                read_unlock(&tasklist_lock);
 }
-
 EXPORT_SYMBOL_GPL(debug_show_all_locks);
 
 /*
@@ -3256,7 +3498,6 @@ void debug_show_held_locks(struct task_struct *task)
 {
                __debug_show_held_locks(task);
 }
-
 EXPORT_SYMBOL_GPL(debug_show_held_locks);
 
 void lockdep_sys_exit(void)