netns xfrm: /proc/net/xfrm_stat in netns
[safe/jmp/linux-2.6] / kernel / lockdep.c
index 77fa776..06e1571 100644 (file)
@@ -875,11 +875,11 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
        if (!entry)
                return 0;
 
-       entry->class = this;
-       entry->distance = distance;
        if (!save_trace(&entry->trace))
                return 0;
 
+       entry->class = this;
+       entry->distance = distance;
        /*
         * Since we never remove from the dependency list, the list can
         * be walked lockless by other CPUs, it's only allocation
@@ -2169,12 +2169,11 @@ void early_boot_irqs_on(void)
 /*
  * Hardirqs will be enabled:
  */
-void trace_hardirqs_on_caller(unsigned long a0)
+void trace_hardirqs_on_caller(unsigned long ip)
 {
        struct task_struct *curr = current;
-       unsigned long ip;
 
-       time_hardirqs_on(CALLER_ADDR0, a0);
+       time_hardirqs_on(CALLER_ADDR0, ip);
 
        if (unlikely(!debug_locks || current->lockdep_recursion))
                return;
@@ -2188,7 +2187,6 @@ void trace_hardirqs_on_caller(unsigned long a0)
        }
        /* we'll do an OFF -> ON transition: */
        curr->hardirqs_enabled = 1;
-       ip = (unsigned long) __builtin_return_address(0);
 
        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
                return;
@@ -2224,11 +2222,11 @@ EXPORT_SYMBOL(trace_hardirqs_on);
 /*
  * Hardirqs were disabled:
  */
-void trace_hardirqs_off_caller(unsigned long a0)
+void trace_hardirqs_off_caller(unsigned long ip)
 {
        struct task_struct *curr = current;
 
-       time_hardirqs_off(CALLER_ADDR0, a0);
+       time_hardirqs_off(CALLER_ADDR0, ip);
 
        if (unlikely(!debug_locks || current->lockdep_recursion))
                return;
@@ -2241,7 +2239,7 @@ void trace_hardirqs_off_caller(unsigned long a0)
                 * We have done an ON -> OFF transition:
                 */
                curr->hardirqs_enabled = 0;
-               curr->hardirq_disable_ip = _RET_IP_;
+               curr->hardirq_disable_ip = ip;
                curr->hardirq_disable_event = ++curr->irq_events;
                debug_atomic_inc(&hardirqs_off_events);
        } else
@@ -2582,7 +2580,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
        hlock->trylock = trylock;
        hlock->read = read;
        hlock->check = check;
-       hlock->hardirqs_off = hardirqs_off;
+       hlock->hardirqs_off = !!hardirqs_off;
 #ifdef CONFIG_LOCK_STAT
        hlock->waittime_stamp = 0;
        hlock->holdtime_stamp = sched_clock();
@@ -3029,7 +3027,7 @@ found_it:
 
        stats = get_lock_stats(hlock_class(hlock));
        if (point < ARRAY_SIZE(stats->contention_point))
-               stats->contention_point[i]++;
+               stats->contention_point[point]++;
        if (lock->cpu != smp_processor_id())
                stats->bounces[bounce_contended + !!hlock->read]++;
        put_lock_stats(stats);
@@ -3417,9 +3415,10 @@ retry:
                }
                printk(" ignoring it.\n");
                unlock = 0;
+       } else {
+               if (count != 10)
+                       printk(KERN_CONT " locked it.\n");
        }
-       if (count != 10)
-               printk(" locked it.\n");
 
        do_each_thread(g, p) {
                /*