mm: avoid null-pointer deref in sync_mm_rss()
[safe/jmp/linux-2.6] / kernel / trace / trace_hw_branches.c
index 23b6385..7b97000 100644 (file)
 
 #define BTS_BUFFER_SIZE (1 << 13)
 
-static DEFINE_PER_CPU(struct bts_tracer *, tracer);
-static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE], buffer);
+static DEFINE_PER_CPU(struct bts_tracer *, hwb_tracer);
+static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE], hwb_buffer);
 
-#define this_tracer per_cpu(tracer, smp_processor_id())
+#define this_tracer per_cpu(hwb_tracer, smp_processor_id())
 
 static int trace_hw_branches_enabled __read_mostly;
 static int trace_hw_branches_suspended __read_mostly;
@@ -32,12 +32,13 @@ static struct trace_array *hw_branch_trace __read_mostly;
 
 static void bts_trace_init_cpu(int cpu)
 {
-       per_cpu(tracer, cpu) =
-               ds_request_bts_cpu(cpu, per_cpu(buffer, cpu), BTS_BUFFER_SIZE,
-                                  NULL, (size_t)-1, BTS_KERNEL);
+       per_cpu(hwb_tracer, cpu) =
+               ds_request_bts_cpu(cpu, per_cpu(hwb_buffer, cpu),
+                                  BTS_BUFFER_SIZE, NULL, (size_t)-1,
+                                  BTS_KERNEL);
 
-       if (IS_ERR(per_cpu(tracer, cpu)))
-               per_cpu(tracer, cpu) = NULL;
+       if (IS_ERR(per_cpu(hwb_tracer, cpu)))
+               per_cpu(hwb_tracer, cpu) = NULL;
 }
 
 static int bts_trace_init(struct trace_array *tr)
@@ -51,7 +52,7 @@ static int bts_trace_init(struct trace_array *tr)
        for_each_online_cpu(cpu) {
                bts_trace_init_cpu(cpu);
 
-               if (likely(per_cpu(tracer, cpu)))
+               if (likely(per_cpu(hwb_tracer, cpu)))
                        trace_hw_branches_enabled = 1;
        }
        trace_hw_branches_suspended = 0;
@@ -67,9 +68,9 @@ static void bts_trace_reset(struct trace_array *tr)
 
        get_online_cpus();
        for_each_online_cpu(cpu) {
-               if (likely(per_cpu(tracer, cpu))) {
-                       ds_release_bts(per_cpu(tracer, cpu));
-                       per_cpu(tracer, cpu) = NULL;
+               if (likely(per_cpu(hwb_tracer, cpu))) {
+                       ds_release_bts(per_cpu(hwb_tracer, cpu));
+                       per_cpu(hwb_tracer, cpu) = NULL;
                }
        }
        trace_hw_branches_enabled = 0;
@@ -83,8 +84,8 @@ static void bts_trace_start(struct trace_array *tr)
 
        get_online_cpus();
        for_each_online_cpu(cpu)
-               if (likely(per_cpu(tracer, cpu)))
-                       ds_resume_bts(per_cpu(tracer, cpu));
+               if (likely(per_cpu(hwb_tracer, cpu)))
+                       ds_resume_bts(per_cpu(hwb_tracer, cpu));
        trace_hw_branches_suspended = 0;
        put_online_cpus();
 }
@@ -95,8 +96,8 @@ static void bts_trace_stop(struct trace_array *tr)
 
        get_online_cpus();
        for_each_online_cpu(cpu)
-               if (likely(per_cpu(tracer, cpu)))
-                       ds_suspend_bts(per_cpu(tracer, cpu));
+               if (likely(per_cpu(hwb_tracer, cpu)))
+                       ds_suspend_bts(per_cpu(hwb_tracer, cpu));
        trace_hw_branches_suspended = 1;
        put_online_cpus();
 }
@@ -114,16 +115,16 @@ static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb,
                        bts_trace_init_cpu(cpu);
 
                        if (trace_hw_branches_suspended &&
-                           likely(per_cpu(tracer, cpu)))
-                               ds_suspend_bts(per_cpu(tracer, cpu));
+                           likely(per_cpu(hwb_tracer, cpu)))
+                               ds_suspend_bts(per_cpu(hwb_tracer, cpu));
                }
                break;
 
        case CPU_DOWN_PREPARE:
                /* The notification is sent with interrupts enabled. */
-               if (likely(per_cpu(tracer, cpu))) {
-                       ds_release_bts(per_cpu(tracer, cpu));
-                       per_cpu(tracer, cpu) = NULL;
+               if (likely(per_cpu(hwb_tracer, cpu))) {
+                       ds_release_bts(per_cpu(hwb_tracer, cpu));
+                       per_cpu(hwb_tracer, cpu) = NULL;
                }
        }
 
@@ -165,6 +166,7 @@ void trace_hw_branch(u64 from, u64 to)
        struct ftrace_event_call *call = &event_hw_branch;
        struct trace_array *tr = hw_branch_trace;
        struct ring_buffer_event *event;
+       struct ring_buffer *buf;
        struct hw_branch_entry *entry;
        unsigned long irq1;
        int cpu;
@@ -180,7 +182,8 @@ void trace_hw_branch(u64 from, u64 to)
        if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
                goto out;
 
-       event = trace_buffer_lock_reserve(tr, TRACE_HW_BRANCHES,
+       buf = tr->buffer;
+       event = trace_buffer_lock_reserve(buf, TRACE_HW_BRANCHES,
                                          sizeof(*entry), 0, 0);
        if (!event)
                goto out;
@@ -189,8 +192,8 @@ void trace_hw_branch(u64 from, u64 to)
        entry->ent.type = TRACE_HW_BRANCHES;
        entry->from = from;
        entry->to   = to;
-       if (!filter_check_discard(call, entry, tr->buffer, event))
-               trace_buffer_unlock_commit(tr, event, 0, 0);
+       if (!filter_check_discard(call, entry, buf, event))
+               trace_buffer_unlock_commit(buf, event, 0, 0);
 
  out:
        atomic_dec(&tr->data[cpu]->disabled);
@@ -256,8 +259,8 @@ static void trace_bts_prepare(struct trace_iterator *iter)
 
        get_online_cpus();
        for_each_online_cpu(cpu)
-               if (likely(per_cpu(tracer, cpu)))
-                       ds_suspend_bts(per_cpu(tracer, cpu));
+               if (likely(per_cpu(hwb_tracer, cpu)))
+                       ds_suspend_bts(per_cpu(hwb_tracer, cpu));
        /*
         * We need to collect the trace on the respective cpu since ftrace
         * implicitly adds the record for the current cpu.
@@ -266,8 +269,8 @@ static void trace_bts_prepare(struct trace_iterator *iter)
        on_each_cpu(trace_bts_cpu, iter->tr, 1);
 
        for_each_online_cpu(cpu)
-               if (likely(per_cpu(tracer, cpu)))
-                       ds_resume_bts(per_cpu(tracer, cpu));
+               if (likely(per_cpu(hwb_tracer, cpu)))
+                       ds_resume_bts(per_cpu(hwb_tracer, cpu));
        put_online_cpus();
 }