mm: avoid null-pointer deref in sync_mm_rss()
[safe/jmp/linux-2.6] / kernel / trace / trace_hw_branches.c
index e3e7db6..7b97000 100644 (file)
 /*
- * h/w branch tracer for x86 based on bts
+ * h/w branch tracer for x86 based on BTS
  *
  * Copyright (C) 2008-2009 Intel Corporation.
  * Markus Metzger <markus.t.metzger@gmail.com>, 2008-2009
- *
  */
-
-#include <linux/module.h>
-#include <linux/fs.h>
+#include <linux/kallsyms.h>
 #include <linux/debugfs.h>
 #include <linux/ftrace.h>
-#include <linux/kallsyms.h>
-#include <linux/mutex.h>
+#include <linux/module.h>
 #include <linux/cpu.h>
 #include <linux/smp.h>
+#include <linux/fs.h>
 
 #include <asm/ds.h>
 
-#include "trace.h"
 #include "trace_output.h"
+#include "trace.h"
 
 
-#define SIZEOF_BTS (1 << 13)
+#define BTS_BUFFER_SIZE (1 << 13)
 
-/* The tracer mutex protects the below per-cpu tracer array.
-   It needs to be held to:
-   - start tracing on all cpus
-   - stop tracing on all cpus
-   - start tracing on a single hotplug cpu
-   - stop tracing on a single hotplug cpu
-   - read the trace from all cpus
-   - read the trace from a single cpu
-*/
-static DEFINE_MUTEX(bts_tracer_mutex);
-static DEFINE_PER_CPU(struct bts_tracer *, tracer);
-static DEFINE_PER_CPU(unsigned char[SIZEOF_BTS], buffer);
+static DEFINE_PER_CPU(struct bts_tracer *, hwb_tracer);
+static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE], hwb_buffer);
 
-#define this_tracer per_cpu(tracer, smp_processor_id())
-#define this_buffer per_cpu(buffer, smp_processor_id())
+#define this_tracer per_cpu(hwb_tracer, smp_processor_id())
 
-static int __read_mostly trace_hw_branches_enabled;
+static int trace_hw_branches_enabled __read_mostly;
+static int trace_hw_branches_suspended __read_mostly;
 static struct trace_array *hw_branch_trace __read_mostly;
 
 
-/*
- * Start tracing on the current cpu.
- * The argument is ignored.
- *
- * pre: bts_tracer_mutex must be locked.
- */
-static void bts_trace_start_cpu(void *arg)
+static void bts_trace_init_cpu(int cpu)
 {
-       if (this_tracer)
-               ds_release_bts(this_tracer);
-
-       this_tracer =
-               ds_request_bts(/* task = */ NULL, this_buffer, SIZEOF_BTS,
-                              /* ovfl = */ NULL, /* th = */ (size_t)-1,
-                              BTS_KERNEL);
-       if (IS_ERR(this_tracer)) {
-               this_tracer = NULL;
-               return;
-       }
+       per_cpu(hwb_tracer, cpu) =
+               ds_request_bts_cpu(cpu, per_cpu(hwb_buffer, cpu),
+                                  BTS_BUFFER_SIZE, NULL, (size_t)-1,
+                                  BTS_KERNEL);
+
+       if (IS_ERR(per_cpu(hwb_tracer, cpu)))
+               per_cpu(hwb_tracer, cpu) = NULL;
 }
 
-static void bts_trace_start(struct trace_array *tr)
+static int bts_trace_init(struct trace_array *tr)
 {
-       mutex_lock(&bts_tracer_mutex);
+       int cpu;
 
-       on_each_cpu(bts_trace_start_cpu, NULL, 1);
-       trace_hw_branches_enabled = 1;
+       hw_branch_trace = tr;
+       trace_hw_branches_enabled = 0;
 
-       mutex_unlock(&bts_tracer_mutex);
+       get_online_cpus();
+       for_each_online_cpu(cpu) {
+               bts_trace_init_cpu(cpu);
+
+               if (likely(per_cpu(hwb_tracer, cpu)))
+                       trace_hw_branches_enabled = 1;
+       }
+       trace_hw_branches_suspended = 0;
+       put_online_cpus();
+
+       /* If we could not enable tracing on a single cpu, we fail. */
+       return trace_hw_branches_enabled ? 0 : -EOPNOTSUPP;
 }
 
-/*
- * Start tracing on the current cpu.
- * The argument is ignored.
- *
- * pre: bts_tracer_mutex must be locked.
- */
-static void bts_trace_stop_cpu(void *arg)
+static void bts_trace_reset(struct trace_array *tr)
 {
-       if (this_tracer) {
-               ds_release_bts(this_tracer);
-               this_tracer = NULL;
+       int cpu;
+
+       get_online_cpus();
+       for_each_online_cpu(cpu) {
+               if (likely(per_cpu(hwb_tracer, cpu))) {
+                       ds_release_bts(per_cpu(hwb_tracer, cpu));
+                       per_cpu(hwb_tracer, cpu) = NULL;
+               }
        }
+       trace_hw_branches_enabled = 0;
+       trace_hw_branches_suspended = 0;
+       put_online_cpus();
 }
 
-static void bts_trace_stop(struct trace_array *tr)
+static void bts_trace_start(struct trace_array *tr)
 {
-       mutex_lock(&bts_tracer_mutex);
+       int cpu;
 
-       trace_hw_branches_enabled = 0;
-       on_each_cpu(bts_trace_stop_cpu, NULL, 1);
+       get_online_cpus();
+       for_each_online_cpu(cpu)
+               if (likely(per_cpu(hwb_tracer, cpu)))
+                       ds_resume_bts(per_cpu(hwb_tracer, cpu));
+       trace_hw_branches_suspended = 0;
+       put_online_cpus();
+}
 
-       mutex_unlock(&bts_tracer_mutex);
+static void bts_trace_stop(struct trace_array *tr)
+{
+       int cpu;
+
+       get_online_cpus();
+       for_each_online_cpu(cpu)
+               if (likely(per_cpu(hwb_tracer, cpu)))
+                       ds_suspend_bts(per_cpu(hwb_tracer, cpu));
+       trace_hw_branches_suspended = 1;
+       put_online_cpus();
 }
 
 static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb,
                                     unsigned long action, void *hcpu)
 {
-       unsigned int cpu = (unsigned long)hcpu;
-
-       mutex_lock(&bts_tracer_mutex);
-
-       if (!trace_hw_branches_enabled)
-               goto out;
+       int cpu = (long)hcpu;
 
        switch (action) {
        case CPU_ONLINE:
        case CPU_DOWN_FAILED:
-               smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1);
+               /* The notification is sent with interrupts enabled. */
+               if (trace_hw_branches_enabled) {
+                       bts_trace_init_cpu(cpu);
+
+                       if (trace_hw_branches_suspended &&
+                           likely(per_cpu(hwb_tracer, cpu)))
+                               ds_suspend_bts(per_cpu(hwb_tracer, cpu));
+               }
                break;
+
        case CPU_DOWN_PREPARE:
-               smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1);
-               break;
+               /* The notification is sent with interrupts enabled. */
+               if (likely(per_cpu(hwb_tracer, cpu))) {
+                       ds_release_bts(per_cpu(hwb_tracer, cpu));
+                       per_cpu(hwb_tracer, cpu) = NULL;
+               }
        }
 
- out:
-       mutex_unlock(&bts_tracer_mutex);
        return NOTIFY_DONE;
 }
 
@@ -127,22 +135,6 @@ static struct notifier_block bts_hotcpu_notifier __cpuinitdata = {
        .notifier_call = bts_hotcpu_handler
 };
 
-static int bts_trace_init(struct trace_array *tr)
-{
-       hw_branch_trace = tr;
-
-       register_hotcpu_notifier(&bts_hotcpu_notifier);
-       bts_trace_start(tr);
-
-       return 0;
-}
-
-static void bts_trace_reset(struct trace_array *tr)
-{
-       bts_trace_stop(tr);
-       unregister_hotcpu_notifier(&bts_hotcpu_notifier);
-}
-
 static void bts_trace_print_header(struct seq_file *m)
 {
        seq_puts(m, "# CPU#        TO  <-  FROM\n");
@@ -150,10 +142,10 @@ static void bts_trace_print_header(struct seq_file *m)
 
 static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
 {
+       unsigned long symflags = TRACE_ITER_SYM_OFFSET;
        struct trace_entry *entry = iter->ent;
        struct trace_seq *seq = &iter->seq;
        struct hw_branch_entry *it;
-       unsigned long symflags = TRACE_ITER_SYM_OFFSET;
 
        trace_assign_type(it, entry);
 
@@ -164,15 +156,17 @@ static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
                    seq_print_ip_sym(seq, it->from, symflags) &&
                    trace_seq_printf(seq, "\n"))
                        return TRACE_TYPE_HANDLED;
-               return TRACE_TYPE_PARTIAL_LINE;;
+               return TRACE_TYPE_PARTIAL_LINE;
        }
        return TRACE_TYPE_UNHANDLED;
 }
 
 void trace_hw_branch(u64 from, u64 to)
 {
+       struct ftrace_event_call *call = &event_hw_branch;
        struct trace_array *tr = hw_branch_trace;
        struct ring_buffer_event *event;
+       struct ring_buffer *buf;
        struct hw_branch_entry *entry;
        unsigned long irq1;
        int cpu;
@@ -188,7 +182,8 @@ void trace_hw_branch(u64 from, u64 to)
        if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
                goto out;
 
-       event = trace_buffer_lock_reserve(tr, TRACE_HW_BRANCHES,
+       buf = tr->buffer;
+       event = trace_buffer_lock_reserve(buf, TRACE_HW_BRANCHES,
                                          sizeof(*entry), 0, 0);
        if (!event)
                goto out;
@@ -197,7 +192,8 @@ void trace_hw_branch(u64 from, u64 to)
        entry->ent.type = TRACE_HW_BRANCHES;
        entry->from = from;
        entry->to   = to;
-       trace_buffer_unlock_commit(tr, event, 0, 0);
+       if (!filter_check_discard(call, entry, buf, event))
+               trace_buffer_unlock_commit(buf, event, 0, 0);
 
  out:
        atomic_dec(&tr->data[cpu]->disabled);
@@ -227,11 +223,11 @@ static void trace_bts_at(const struct bts_trace *trace, void *at)
 /*
  * Collect the trace on the current cpu and write it into the ftrace buffer.
  *
- * pre: bts_tracer_mutex must be locked
+ * pre: tracing must be suspended on the current cpu
  */
 static void trace_bts_cpu(void *arg)
 {
-       struct trace_array *tr = (struct trace_array *) arg;
+       struct trace_array *tr = (struct trace_array *)arg;
        const struct bts_trace *trace;
        unsigned char *at;
 
@@ -244,10 +240,9 @@ static void trace_bts_cpu(void *arg)
        if (unlikely(!this_tracer))
                return;
 
-       ds_suspend_bts(this_tracer);
        trace = ds_read_bts(this_tracer);
        if (!trace)
-               goto out;
+               return;
 
        for (at = trace->ds.top; (void *)at < trace->ds.end;
             at += trace->ds.size)
@@ -256,18 +251,27 @@ static void trace_bts_cpu(void *arg)
        for (at = trace->ds.begin; (void *)at < trace->ds.top;
             at += trace->ds.size)
                trace_bts_at(trace, at);
-
-out:
-       ds_resume_bts(this_tracer);
 }
 
 static void trace_bts_prepare(struct trace_iterator *iter)
 {
-       mutex_lock(&bts_tracer_mutex);
+       int cpu;
 
+       get_online_cpus();
+       for_each_online_cpu(cpu)
+               if (likely(per_cpu(hwb_tracer, cpu)))
+                       ds_suspend_bts(per_cpu(hwb_tracer, cpu));
+       /*
+        * We need to collect the trace on the respective cpu since ftrace
+        * implicitly adds the record for the current cpu.
+        * Once that is more flexible, we could collect the data from any cpu.
+        */
        on_each_cpu(trace_bts_cpu, iter->tr, 1);
 
-       mutex_unlock(&bts_tracer_mutex);
+       for_each_online_cpu(cpu)
+               if (likely(per_cpu(hwb_tracer, cpu)))
+                       ds_resume_bts(per_cpu(hwb_tracer, cpu));
+       put_online_cpus();
 }
 
 static void trace_bts_close(struct trace_iterator *iter)
@@ -277,11 +281,11 @@ static void trace_bts_close(struct trace_iterator *iter)
 
 void trace_hw_branch_oops(void)
 {
-       mutex_lock(&bts_tracer_mutex);
-
-       trace_bts_cpu(hw_branch_trace);
-
-       mutex_unlock(&bts_tracer_mutex);
+       if (this_tracer) {
+               ds_suspend_bts_noirq(this_tracer);
+               trace_bts_cpu(hw_branch_trace);
+               ds_resume_bts_noirq(this_tracer);
+       }
 }
 
 struct tracer bts_tracer __read_mostly =
@@ -294,11 +298,15 @@ struct tracer bts_tracer __read_mostly =
        .start          = bts_trace_start,
        .stop           = bts_trace_stop,
        .open           = trace_bts_prepare,
-       .close          = trace_bts_close
+       .close          = trace_bts_close,
+#ifdef CONFIG_FTRACE_SELFTEST
+       .selftest       = trace_selftest_startup_hw_branches,
+#endif /* CONFIG_FTRACE_SELFTEST */
 };
 
 __init static int init_bts_trace(void)
 {
+       register_hotcpu_notifier(&bts_hotcpu_notifier);
        return register_tracer(&bts_tracer);
 }
 device_initcall(init_bts_trace);