perf: Optimize perf_event_mmap_ctx()
[safe/jmp/linux-2.6] / kernel / lockdep.c
index 0bb246e..f5dcd36 100644 (file)
@@ -49,7 +49,7 @@
 #include "lockdep_internals.h"
 
 #define CREATE_TRACE_POINTS
-#include <trace/events/lockdep.h>
+#include <trace/events/lock.h>
 
 #ifdef CONFIG_PROVE_LOCKING
 int prove_locking = 1;
@@ -142,6 +142,11 @@ static inline struct lock_class *hlock_class(struct held_lock *hlock)
 #ifdef CONFIG_LOCK_STAT
 static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
 
+static inline u64 lockstat_clock(void)
+{
+       return cpu_clock(smp_processor_id());
+}
+
 static int lock_point(unsigned long points[], unsigned long ip)
 {
        int i;
@@ -158,7 +163,7 @@ static int lock_point(unsigned long points[], unsigned long ip)
        return i;
 }
 
-static void lock_time_inc(struct lock_time *lt, s64 time)
+static void lock_time_inc(struct lock_time *lt, u64 time)
 {
        if (time > lt->max)
                lt->max = time;
@@ -234,12 +239,12 @@ static void put_lock_stats(struct lock_class_stats *stats)
 static void lock_release_holdtime(struct held_lock *hlock)
 {
        struct lock_class_stats *stats;
-       s64 holdtime;
+       u64 holdtime;
 
        if (!lock_stat)
                return;
 
-       holdtime = sched_clock() - hlock->holdtime_stamp;
+       holdtime = lockstat_clock() - hlock->holdtime_stamp;
 
        stats = get_lock_stats(hlock_class(hlock));
        if (hlock->read)
@@ -399,7 +404,6 @@ unsigned int nr_hardirq_chains;
 unsigned int nr_softirq_chains;
 unsigned int nr_process_chains;
 unsigned int max_lockdep_depth;
-unsigned int max_recursion_depth;
 
 #ifdef CONFIG_DEBUG_LOCKDEP
 /*
@@ -429,11 +433,8 @@ atomic_t redundant_softirqs_on;
 atomic_t redundant_softirqs_off;
 atomic_t nr_unused_locks;
 atomic_t nr_cyclic_checks;
-atomic_t nr_cyclic_check_recursions;
 atomic_t nr_find_usage_forwards_checks;
-atomic_t nr_find_usage_forwards_recursions;
 atomic_t nr_find_usage_backwards_checks;
-atomic_t nr_find_usage_backwards_recursions;
 #endif
 
 /*
@@ -582,6 +583,9 @@ static int static_obj(void *obj)
        if ((addr >= start) && (addr < end))
                return 1;
 
+       if (arch_is_kernel_data(addr))
+               return 1;
+
 #ifdef CONFIG_SMP
        /*
         * percpu var?
@@ -861,14 +865,15 @@ struct circular_queue {
 };
 
 static struct circular_queue lock_cq;
-static unsigned long bfs_accessed[BITS_TO_LONGS(MAX_LOCKDEP_ENTRIES)];
 
 unsigned int max_bfs_queue_depth;
 
+static unsigned int lockdep_dependency_gen_id;
+
 static inline void __cq_init(struct circular_queue *cq)
 {
        cq->front = cq->rear = 0;
-       bitmap_zero(bfs_accessed, MAX_LOCKDEP_ENTRIES);
+       lockdep_dependency_gen_id++;
 }
 
 static inline int __cq_empty(struct circular_queue *cq)
@@ -914,7 +919,7 @@ static inline void mark_lock_accessed(struct lock_list *lock,
        nr = lock - list_entries;
        WARN_ON(nr >= nr_list_entries);
        lock->parent = parent;
-       set_bit(nr, bfs_accessed);
+       lock->class->dep_gen_id = lockdep_dependency_gen_id;
 }
 
 static inline unsigned long lock_accessed(struct lock_list *lock)
@@ -923,7 +928,7 @@ static inline unsigned long lock_accessed(struct lock_list *lock)
 
        nr = lock - list_entries;
        WARN_ON(nr >= nr_list_entries);
-       return test_bit(nr, bfs_accessed);
+       return lock->class->dep_gen_id == lockdep_dependency_gen_id;
 }
 
 static inline struct lock_list *get_lock_parent(struct lock_list *child)
@@ -2792,7 +2797,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
        hlock->references = references;
 #ifdef CONFIG_LOCK_STAT
        hlock->waittime_stamp = 0;
-       hlock->holdtime_stamp = sched_clock();
+       hlock->holdtime_stamp = lockstat_clock();
 #endif
 
        if (check == 2 && !mark_irqflags(curr, hlock))
@@ -3322,7 +3327,7 @@ found_it:
        if (hlock->instance != lock)
                return;
 
-       hlock->waittime_stamp = sched_clock();
+       hlock->waittime_stamp = lockstat_clock();
 
        contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
        contending_point = lock_point(hlock_class(hlock)->contending_point,
@@ -3345,8 +3350,7 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip)
        struct held_lock *hlock, *prev_hlock;
        struct lock_class_stats *stats;
        unsigned int depth;
-       u64 now;
-       s64 waittime = 0;
+       u64 now, waittime = 0;
        int i, cpu;
 
        depth = curr->lockdep_depth;
@@ -3374,7 +3378,7 @@ found_it:
 
        cpu = smp_processor_id();
        if (hlock->waittime_stamp) {
-               now = sched_clock();
+               now = lockstat_clock();
                waittime = now - hlock->waittime_stamp;
                hlock->holdtime_stamp = now;
        }
@@ -3602,10 +3606,11 @@ void __init lockdep_info(void)
                sizeof(struct list_head) * CLASSHASH_SIZE +
                sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
                sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
-               sizeof(struct list_head) * CHAINHASH_SIZE) / 1024
+               sizeof(struct list_head) * CHAINHASH_SIZE
 #ifdef CONFIG_PROVE_LOCKING
-               + sizeof(struct circular_queue) + sizeof(bfs_accessed)
+               + sizeof(struct circular_queue)
 #endif
+               ) / 1024
                );
 
        printk(" per task-struct memory footprint: %lu bytes\n",