perf_counter: hook up the tracepoint events
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Thu, 19 Mar 2009 19:26:17 +0000 (20:26 +0100)
committerIngo Molnar <mingo@elte.hu>
Mon, 6 Apr 2009 07:30:16 +0000 (09:30 +0200)
Impact: new perfcounters feature

Enable usage of tracepoints as perf counter events.

tracepoint event ids can be found in /debug/tracing/event/*/*/id
and (for now) are represented as -65536+id in the type field.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Orig-LKML-Reference: <20090319194233.744044174@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
include/linux/perf_counter.h
init/Kconfig
kernel/perf_counter.c

index 08c11a6..065984c 100644 (file)
@@ -53,6 +53,8 @@ enum hw_event_types {
        PERF_COUNT_PAGE_FAULTS_MAJ      = -7,
 
        PERF_SW_EVENTS_MIN              = -8,
+
+       PERF_TP_EVENTS_MIN              = -65536
 };
 
 /*
@@ -222,6 +224,7 @@ struct perf_counter {
        struct perf_data                *usrdata;
        struct perf_data                data[2];
 
+       void (*destroy)(struct perf_counter *);
        struct rcu_head                 rcu_head;
 #endif
 };
index 38a2ecd..4f64714 100644 (file)
@@ -947,6 +947,11 @@ config PERF_COUNTERS
 
          Say Y if unsure.
 
+config EVENT_PROFILE
+       bool "Tracepoint profile sources"
+       depends on PERF_COUNTERS && EVENT_TRACER
+       default y
+
 endmenu
 
 config VM_EVENT_COUNTERS
index 97f891f..0bbe3e4 100644 (file)
@@ -1152,6 +1152,9 @@ static void free_counter_rcu(struct rcu_head *head)
 
 static void free_counter(struct perf_counter *counter)
 {
+       if (counter->destroy)
+               counter->destroy(counter);
+
        call_rcu(&counter->rcu_head, free_counter_rcu);
 }
 
@@ -1727,6 +1730,45 @@ static const struct hw_perf_counter_ops perf_ops_cpu_migrations = {
        .read           = cpu_migrations_perf_counter_read,
 };
 
+#ifdef CONFIG_EVENT_PROFILE
+void perf_tpcounter_event(int event_id)
+{
+       perf_swcounter_event(PERF_TP_EVENTS_MIN + event_id, 1, 1,
+                       task_pt_regs(current));
+}
+
+extern int ftrace_profile_enable(int);
+extern void ftrace_profile_disable(int);
+
+static void tp_perf_counter_destroy(struct perf_counter *counter)
+{
+       int event_id = counter->hw_event.type - PERF_TP_EVENTS_MIN;
+
+       ftrace_profile_disable(event_id);
+}
+
+static const struct hw_perf_counter_ops *
+tp_perf_counter_init(struct perf_counter *counter)
+{
+       int event_id = counter->hw_event.type - PERF_TP_EVENTS_MIN;
+       int ret;
+
+       ret = ftrace_profile_enable(event_id);
+       if (ret)
+               return NULL;
+
+       counter->destroy = tp_perf_counter_destroy;
+
+       return &perf_ops_generic;
+}
+#else
+static const struct hw_perf_counter_ops *
+tp_perf_counter_init(struct perf_counter *counter)
+{
+       return NULL;
+}
+#endif
+
 static const struct hw_perf_counter_ops *
 sw_perf_counter_init(struct perf_counter *counter)
 {
@@ -1772,6 +1814,7 @@ sw_perf_counter_init(struct perf_counter *counter)
                        hw_ops = &perf_ops_cpu_migrations;
                break;
        default:
+               hw_ops = tp_perf_counter_init(counter);
                break;
        }