perf: Drop the obsolete profile naming for trace events
authorFrederic Weisbecker <fweisbec@gmail.com>
Fri, 5 Mar 2010 04:35:37 +0000 (05:35 +0100)
committerFrederic Weisbecker <fweisbec@gmail.com>
Wed, 10 Mar 2010 13:47:18 +0000 (14:47 +0100)
Drop the obsolete "profile" naming used by perf for trace events.
Perf can now do more than simple events counting, so generalize
the API naming.

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Masami Hiramatsu <mhiramat@redhat.com>
Cc: Jason Baron <jbaron@redhat.com>
include/linux/ftrace_event.h
include/linux/syscalls.h
include/trace/ftrace.h
include/trace/syscall.h
kernel/perf_event.c
kernel/trace/Makefile
kernel/trace/trace_event_perf.c [moved from kernel/trace/trace_event_profile.c with 73% similarity]
kernel/trace/trace_events.c
kernel/trace/trace_kprobe.c
kernel/trace/trace_syscalls.c

index ac424f1..c0f4b36 100644 (file)
@@ -131,12 +131,12 @@ struct ftrace_event_call {
        void                    *mod;
        void                    *data;
 
        void                    *mod;
        void                    *data;
 
-       int                     profile_count;
-       int                     (*profile_enable)(struct ftrace_event_call *);
-       void                    (*profile_disable)(struct ftrace_event_call *);
+       int                     perf_refcount;
+       int                     (*perf_event_enable)(struct ftrace_event_call *);
+       void                    (*perf_event_disable)(struct ftrace_event_call *);
 };
 
 };
 
-#define FTRACE_MAX_PROFILE_SIZE        2048
+#define PERF_MAX_TRACE_SIZE    2048
 
 #define MAX_FILTER_PRED                32
 #define MAX_FILTER_STR_VAL     256     /* Should handle KSYM_SYMBOL_LEN */
 
 #define MAX_FILTER_PRED                32
 #define MAX_FILTER_STR_VAL     256     /* Should handle KSYM_SYMBOL_LEN */
@@ -190,17 +190,17 @@ struct perf_event;
 
 DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
 
 
 DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
 
-extern int ftrace_profile_enable(int event_id);
-extern void ftrace_profile_disable(int event_id);
+extern int perf_trace_enable(int event_id);
+extern void perf_trace_disable(int event_id);
 extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
                                     char *filter_str);
 extern void ftrace_profile_free_filter(struct perf_event *event);
 extern void *
 extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
                                     char *filter_str);
 extern void ftrace_profile_free_filter(struct perf_event *event);
 extern void *
-ftrace_perf_buf_prepare(int size, unsigned short type, int *rctxp,
+perf_trace_buf_prepare(int size, unsigned short type, int *rctxp,
                         unsigned long *irq_flags);
 
 static inline void
                         unsigned long *irq_flags);
 
 static inline void
-ftrace_perf_buf_submit(void *raw_data, int size, int rctx, u64 addr,
+perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
                       u64 count, unsigned long irq_flags, struct pt_regs *regs)
 {
        struct trace_entry *entry = raw_data;
                       u64 count, unsigned long irq_flags, struct pt_regs *regs)
 {
        struct trace_entry *entry = raw_data;
index 8126f23..51435bc 100644 (file)
@@ -101,18 +101,18 @@ struct perf_event_attr;
 
 #ifdef CONFIG_PERF_EVENTS
 
 
 #ifdef CONFIG_PERF_EVENTS
 
-#define TRACE_SYS_ENTER_PROFILE_INIT(sname)                                   \
-       .profile_enable = prof_sysenter_enable,                                \
-       .profile_disable = prof_sysenter_disable,
+#define TRACE_SYS_ENTER_PERF_INIT(sname)                                      \
+       .perf_event_enable = perf_sysenter_enable,                             \
+       .perf_event_disable = perf_sysenter_disable,
 
 
-#define TRACE_SYS_EXIT_PROFILE_INIT(sname)                                    \
-       .profile_enable = prof_sysexit_enable,                                 \
-       .profile_disable = prof_sysexit_disable,
+#define TRACE_SYS_EXIT_PERF_INIT(sname)                                               \
+       .perf_event_enable = perf_sysexit_enable,                              \
+       .perf_event_disable = perf_sysexit_disable,
 #else
 #else
-#define TRACE_SYS_ENTER_PROFILE(sname)
-#define TRACE_SYS_ENTER_PROFILE_INIT(sname)
-#define TRACE_SYS_EXIT_PROFILE(sname)
-#define TRACE_SYS_EXIT_PROFILE_INIT(sname)
+#define TRACE_SYS_ENTER_PERF(sname)
+#define TRACE_SYS_ENTER_PERF_INIT(sname)
+#define TRACE_SYS_EXIT_PERF(sname)
+#define TRACE_SYS_EXIT_PERF_INIT(sname)
 #endif /* CONFIG_PERF_EVENTS */
 
 #ifdef CONFIG_FTRACE_SYSCALLS
 #endif /* CONFIG_PERF_EVENTS */
 
 #ifdef CONFIG_FTRACE_SYSCALLS
@@ -149,7 +149,7 @@ struct perf_event_attr;
                .regfunc                = reg_event_syscall_enter,      \
                .unregfunc              = unreg_event_syscall_enter,    \
                .data                   = (void *)&__syscall_meta_##sname,\
                .regfunc                = reg_event_syscall_enter,      \
                .unregfunc              = unreg_event_syscall_enter,    \
                .data                   = (void *)&__syscall_meta_##sname,\
-               TRACE_SYS_ENTER_PROFILE_INIT(sname)                     \
+               TRACE_SYS_ENTER_PERF_INIT(sname)                        \
        }
 
 #define SYSCALL_TRACE_EXIT_EVENT(sname)                                        \
        }
 
 #define SYSCALL_TRACE_EXIT_EVENT(sname)                                        \
@@ -171,7 +171,7 @@ struct perf_event_attr;
                .regfunc                = reg_event_syscall_exit,       \
                .unregfunc              = unreg_event_syscall_exit,     \
                .data                   = (void *)&__syscall_meta_##sname,\
                .regfunc                = reg_event_syscall_exit,       \
                .unregfunc              = unreg_event_syscall_exit,     \
                .data                   = (void *)&__syscall_meta_##sname,\
-               TRACE_SYS_EXIT_PROFILE_INIT(sname)                      \
+               TRACE_SYS_EXIT_PERF_INIT(sname)                 \
        }
 
 #define SYSCALL_METADATA(sname, nb)                            \
        }
 
 #define SYSCALL_METADATA(sname, nb)                            \
index f31bb8b..25ab56f 100644 (file)
@@ -401,18 +401,18 @@ static inline notrace int ftrace_get_offsets_##call(                      \
 #undef DEFINE_EVENT
 #define DEFINE_EVENT(template, name, proto, args)                      \
                                                                        \
 #undef DEFINE_EVENT
 #define DEFINE_EVENT(template, name, proto, args)                      \
                                                                        \
-static void ftrace_profile_##name(proto);                              \
+static void perf_trace_##name(proto);                                  \
                                                                        \
 static notrace int                                                     \
                                                                        \
 static notrace int                                                     \
-ftrace_profile_enable_##name(struct ftrace_event_call *unused)         \
+perf_trace_enable_##name(struct ftrace_event_call *unused)             \
 {                                                                      \
 {                                                                      \
-       return register_trace_##name(ftrace_profile_##name);            \
+       return register_trace_##name(perf_trace_##name);                \
 }                                                                      \
                                                                        \
 static notrace void                                                    \
 }                                                                      \
                                                                        \
 static notrace void                                                    \
-ftrace_profile_disable_##name(struct ftrace_event_call *unused)                \
+perf_trace_disable_##name(struct ftrace_event_call *unused)            \
 {                                                                      \
 {                                                                      \
-       unregister_trace_##name(ftrace_profile_##name);                 \
+       unregister_trace_##name(perf_trace_##name);                     \
 }
 
 #undef DEFINE_EVENT_PRINT
 }
 
 #undef DEFINE_EVENT_PRINT
@@ -507,12 +507,12 @@ ftrace_profile_disable_##name(struct ftrace_event_call *unused)           \
 
 #ifdef CONFIG_PERF_EVENTS
 
 
 #ifdef CONFIG_PERF_EVENTS
 
-#define _TRACE_PROFILE_INIT(call)                                      \
-       .profile_enable = ftrace_profile_enable_##call,                 \
-       .profile_disable = ftrace_profile_disable_##call,
+#define _TRACE_PERF_INIT(call)                                         \
+       .perf_event_enable = perf_trace_enable_##call,                  \
+       .perf_event_disable = perf_trace_disable_##call,
 
 #else
 
 #else
-#define _TRACE_PROFILE_INIT(call)
+#define _TRACE_PERF_INIT(call)
 #endif /* CONFIG_PERF_EVENTS */
 
 #undef __entry
 #endif /* CONFIG_PERF_EVENTS */
 
 #undef __entry
@@ -638,7 +638,7 @@ __attribute__((section("_ftrace_events"))) event_##call = {         \
        .unregfunc              = ftrace_raw_unreg_event_##call,        \
        .print_fmt              = print_fmt_##template,                 \
        .define_fields          = ftrace_define_fields_##template,      \
        .unregfunc              = ftrace_raw_unreg_event_##call,        \
        .print_fmt              = print_fmt_##template,                 \
        .define_fields          = ftrace_define_fields_##template,      \
-       _TRACE_PROFILE_INIT(call)                                       \
+       _TRACE_PERF_INIT(call)                                  \
 }
 
 #undef DEFINE_EVENT_PRINT
 }
 
 #undef DEFINE_EVENT_PRINT
@@ -657,18 +657,18 @@ __attribute__((section("_ftrace_events"))) event_##call = {               \
        .unregfunc              = ftrace_raw_unreg_event_##call,        \
        .print_fmt              = print_fmt_##call,                     \
        .define_fields          = ftrace_define_fields_##template,      \
        .unregfunc              = ftrace_raw_unreg_event_##call,        \
        .print_fmt              = print_fmt_##call,                     \
        .define_fields          = ftrace_define_fields_##template,      \
-       _TRACE_PROFILE_INIT(call)                                       \
+       _TRACE_PERF_INIT(call)                                  \
 }
 
 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 
 /*
 }
 
 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 
 /*
- * Define the insertion callback to profile events
+ * Define the insertion callback to perf events
  *
  * The job is very similar to ftrace_raw_event_<call> except that we don't
  * insert in the ring buffer but in a perf counter.
  *
  *
  * The job is very similar to ftrace_raw_event_<call> except that we don't
  * insert in the ring buffer but in a perf counter.
  *
- * static void ftrace_profile_<call>(proto)
+ * static void ftrace_perf_<call>(proto)
  * {
  *     struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
  *     struct ftrace_event_call *event_call = &event_<call>;
  * {
  *     struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
  *     struct ftrace_event_call *event_call = &event_<call>;
@@ -757,7 +757,7 @@ __attribute__((section("_ftrace_events"))) event_##call = {         \
 #undef DECLARE_EVENT_CLASS
 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
 static notrace void                                                    \
 #undef DECLARE_EVENT_CLASS
 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
 static notrace void                                                    \
-ftrace_profile_templ_##call(struct ftrace_event_call *event_call,      \
+perf_trace_templ_##call(struct ftrace_event_call *event_call,          \
                            proto)                                      \
 {                                                                      \
        struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
                            proto)                                      \
 {                                                                      \
        struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
@@ -774,10 +774,10 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \
                             sizeof(u64));                              \
        __entry_size -= sizeof(u32);                                    \
                                                                        \
                             sizeof(u64));                              \
        __entry_size -= sizeof(u32);                                    \
                                                                        \
-       if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE,           \
+       if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE,               \
                      "profile buffer not large enough"))               \
                return;                                                 \
                      "profile buffer not large enough"))               \
                return;                                                 \
-       entry = (struct ftrace_raw_##call *)ftrace_perf_buf_prepare(    \
+       entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare(     \
                __entry_size, event_call->id, &rctx, &irq_flags);       \
        if (!entry)                                                     \
                return;                                                 \
                __entry_size, event_call->id, &rctx, &irq_flags);       \
        if (!entry)                                                     \
                return;                                                 \
@@ -788,17 +788,17 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \
        __regs = &__get_cpu_var(perf_trace_regs);                       \
        perf_fetch_caller_regs(__regs, 2);                              \
                                                                        \
        __regs = &__get_cpu_var(perf_trace_regs);                       \
        perf_fetch_caller_regs(__regs, 2);                              \
                                                                        \
-       ftrace_perf_buf_submit(entry, __entry_size, rctx, __addr,       \
+       perf_trace_buf_submit(entry, __entry_size, rctx, __addr,        \
                               __count, irq_flags, __regs);             \
 }
 
 #undef DEFINE_EVENT
 #define DEFINE_EVENT(template, call, proto, args)              \
                               __count, irq_flags, __regs);             \
 }
 
 #undef DEFINE_EVENT
 #define DEFINE_EVENT(template, call, proto, args)              \
-static notrace void ftrace_profile_##call(proto)               \
+static notrace void perf_trace_##call(proto)                   \
 {                                                              \
        struct ftrace_event_call *event_call = &event_##call;   \
                                                                \
 {                                                              \
        struct ftrace_event_call *event_call = &event_##call;   \
                                                                \
-       ftrace_profile_templ_##template(event_call, args);      \
+       perf_trace_templ_##template(event_call, args);          \
 }
 
 #undef DEFINE_EVENT_PRINT
 }
 
 #undef DEFINE_EVENT_PRINT
index 0387100..e5e5f48 100644 (file)
@@ -47,10 +47,10 @@ enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags);
 #endif
 
 #ifdef CONFIG_PERF_EVENTS
 #endif
 
 #ifdef CONFIG_PERF_EVENTS
-int prof_sysenter_enable(struct ftrace_event_call *call);
-void prof_sysenter_disable(struct ftrace_event_call *call);
-int prof_sysexit_enable(struct ftrace_event_call *call);
-void prof_sysexit_disable(struct ftrace_event_call *call);
+int perf_sysenter_enable(struct ftrace_event_call *call);
+void perf_sysenter_disable(struct ftrace_event_call *call);
+int perf_sysexit_enable(struct ftrace_event_call *call);
+void perf_sysexit_disable(struct ftrace_event_call *call);
 #endif
 
 #endif /* _TRACE_SYSCALL_H */
 #endif
 
 #endif /* _TRACE_SYSCALL_H */
index 45b4b6e..c502b18 100644 (file)
@@ -4347,7 +4347,7 @@ static int perf_tp_event_match(struct perf_event *event,
 
 static void tp_perf_event_destroy(struct perf_event *event)
 {
 
 static void tp_perf_event_destroy(struct perf_event *event)
 {
-       ftrace_profile_disable(event->attr.config);
+       perf_trace_disable(event->attr.config);
 }
 
 static const struct pmu *tp_perf_event_init(struct perf_event *event)
 }
 
 static const struct pmu *tp_perf_event_init(struct perf_event *event)
@@ -4361,7 +4361,7 @@ static const struct pmu *tp_perf_event_init(struct perf_event *event)
                        !capable(CAP_SYS_ADMIN))
                return ERR_PTR(-EPERM);
 
                        !capable(CAP_SYS_ADMIN))
                return ERR_PTR(-EPERM);
 
-       if (ftrace_profile_enable(event->attr.config))
+       if (perf_trace_enable(event->attr.config))
                return NULL;
 
        event->destroy = tp_perf_event_destroy;
                return NULL;
 
        event->destroy = tp_perf_event_destroy;
index d00c6fe..78edc64 100644 (file)
@@ -52,7 +52,7 @@ obj-$(CONFIG_EVENT_TRACING) += trace_events.o
 obj-$(CONFIG_EVENT_TRACING) += trace_export.o
 obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o
 ifeq ($(CONFIG_PERF_EVENTS),y)
 obj-$(CONFIG_EVENT_TRACING) += trace_export.o
 obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o
 ifeq ($(CONFIG_PERF_EVENTS),y)
-obj-$(CONFIG_EVENT_TRACING) += trace_event_profile.o
+obj-$(CONFIG_EVENT_TRACING) += trace_event_perf.o
 endif
 obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
 obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o
 endif
 obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
 obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o
similarity index 73%
rename from kernel/trace/trace_event_profile.c
rename to kernel/trace/trace_event_perf.c
index e66d21e..f315b12 100644 (file)
@@ -1,5 +1,5 @@
 /*
 /*
- * trace event based perf counter profiling
+ * trace event based perf event profiling/tracing
  *
  * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
  * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
  *
  * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
  * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
@@ -14,20 +14,20 @@ DEFINE_PER_CPU(struct pt_regs, perf_trace_regs);
 static char *perf_trace_buf;
 static char *perf_trace_buf_nmi;
 
 static char *perf_trace_buf;
 static char *perf_trace_buf_nmi;
 
-typedef typeof(char [FTRACE_MAX_PROFILE_SIZE]) perf_trace_t ;
+typedef typeof(char [PERF_MAX_TRACE_SIZE]) perf_trace_t ;
 
 /* Count the events in use (per event id, not per instance) */
 
 /* Count the events in use (per event id, not per instance) */
-static int     total_profile_count;
+static int     total_ref_count;
 
 
-static int ftrace_profile_enable_event(struct ftrace_event_call *event)
+static int perf_trace_event_enable(struct ftrace_event_call *event)
 {
        char *buf;
        int ret = -ENOMEM;
 
 {
        char *buf;
        int ret = -ENOMEM;
 
-       if (event->profile_count++ > 0)
+       if (event->perf_refcount++ > 0)
                return 0;
 
                return 0;
 
-       if (!total_profile_count) {
+       if (!total_ref_count) {
                buf = (char *)alloc_percpu(perf_trace_t);
                if (!buf)
                        goto fail_buf;
                buf = (char *)alloc_percpu(perf_trace_t);
                if (!buf)
                        goto fail_buf;
@@ -41,35 +41,35 @@ static int ftrace_profile_enable_event(struct ftrace_event_call *event)
                rcu_assign_pointer(perf_trace_buf_nmi, buf);
        }
 
                rcu_assign_pointer(perf_trace_buf_nmi, buf);
        }
 
-       ret = event->profile_enable(event);
+       ret = event->perf_event_enable(event);
        if (!ret) {
        if (!ret) {
-               total_profile_count++;
+               total_ref_count++;
                return 0;
        }
 
 fail_buf_nmi:
                return 0;
        }
 
 fail_buf_nmi:
-       if (!total_profile_count) {
+       if (!total_ref_count) {
                free_percpu(perf_trace_buf_nmi);
                free_percpu(perf_trace_buf);
                perf_trace_buf_nmi = NULL;
                perf_trace_buf = NULL;
        }
 fail_buf:
                free_percpu(perf_trace_buf_nmi);
                free_percpu(perf_trace_buf);
                perf_trace_buf_nmi = NULL;
                perf_trace_buf = NULL;
        }
 fail_buf:
-       event->profile_count--;
+       event->perf_refcount--;
 
        return ret;
 }
 
 
        return ret;
 }
 
-int ftrace_profile_enable(int event_id)
+int perf_trace_enable(int event_id)
 {
        struct ftrace_event_call *event;
        int ret = -EINVAL;
 
        mutex_lock(&event_mutex);
        list_for_each_entry(event, &ftrace_events, list) {
 {
        struct ftrace_event_call *event;
        int ret = -EINVAL;
 
        mutex_lock(&event_mutex);
        list_for_each_entry(event, &ftrace_events, list) {
-               if (event->id == event_id && event->profile_enable &&
+               if (event->id == event_id && event->perf_event_enable &&
                    try_module_get(event->mod)) {
                    try_module_get(event->mod)) {
-                       ret = ftrace_profile_enable_event(event);
+                       ret = perf_trace_event_enable(event);
                        break;
                }
        }
                        break;
                }
        }
@@ -78,16 +78,16 @@ int ftrace_profile_enable(int event_id)
        return ret;
 }
 
        return ret;
 }
 
-static void ftrace_profile_disable_event(struct ftrace_event_call *event)
+static void perf_trace_event_disable(struct ftrace_event_call *event)
 {
        char *buf, *nmi_buf;
 
 {
        char *buf, *nmi_buf;
 
-       if (--event->profile_count > 0)
+       if (--event->perf_refcount > 0)
                return;
 
                return;
 
-       event->profile_disable(event);
+       event->perf_event_disable(event);
 
 
-       if (!--total_profile_count) {
+       if (!--total_ref_count) {
                buf = perf_trace_buf;
                rcu_assign_pointer(perf_trace_buf, NULL);
 
                buf = perf_trace_buf;
                rcu_assign_pointer(perf_trace_buf, NULL);
 
@@ -105,14 +105,14 @@ static void ftrace_profile_disable_event(struct ftrace_event_call *event)
        }
 }
 
        }
 }
 
-void ftrace_profile_disable(int event_id)
+void perf_trace_disable(int event_id)
 {
        struct ftrace_event_call *event;
 
        mutex_lock(&event_mutex);
        list_for_each_entry(event, &ftrace_events, list) {
                if (event->id == event_id) {
 {
        struct ftrace_event_call *event;
 
        mutex_lock(&event_mutex);
        list_for_each_entry(event, &ftrace_events, list) {
                if (event->id == event_id) {
-                       ftrace_profile_disable_event(event);
+                       perf_trace_event_disable(event);
                        module_put(event->mod);
                        break;
                }
                        module_put(event->mod);
                        break;
                }
@@ -120,8 +120,8 @@ void ftrace_profile_disable(int event_id)
        mutex_unlock(&event_mutex);
 }
 
        mutex_unlock(&event_mutex);
 }
 
-__kprobes void *ftrace_perf_buf_prepare(int size, unsigned short type,
-                                       int *rctxp, unsigned long *irq_flags)
+__kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
+                                      int *rctxp, unsigned long *irq_flags)
 {
        struct trace_entry *entry;
        char *trace_buf, *raw_data;
 {
        struct trace_entry *entry;
        char *trace_buf, *raw_data;
@@ -162,4 +162,4 @@ err_recursion:
        local_irq_restore(*irq_flags);
        return NULL;
 }
        local_irq_restore(*irq_flags);
        return NULL;
 }
-EXPORT_SYMBOL_GPL(ftrace_perf_buf_prepare);
+EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
index 3f972ad..beab8bf 100644 (file)
@@ -938,7 +938,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
                trace_create_file("enable", 0644, call->dir, call,
                                  enable);
 
                trace_create_file("enable", 0644, call->dir, call,
                                  enable);
 
-       if (call->id && call->profile_enable)
+       if (call->id && call->perf_event_enable)
                trace_create_file("id", 0444, call->dir, call,
                                  id);
 
                trace_create_file("id", 0444, call->dir, call,
                                  id);
 
index f7a20a8..1251e36 100644 (file)
@@ -1214,7 +1214,7 @@ static int set_print_fmt(struct trace_probe *tp)
 #ifdef CONFIG_PERF_EVENTS
 
 /* Kprobe profile handler */
 #ifdef CONFIG_PERF_EVENTS
 
 /* Kprobe profile handler */
-static __kprobes void kprobe_profile_func(struct kprobe *kp,
+static __kprobes void kprobe_perf_func(struct kprobe *kp,
                                         struct pt_regs *regs)
 {
        struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
                                         struct pt_regs *regs)
 {
        struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
@@ -1227,11 +1227,11 @@ static __kprobes void kprobe_profile_func(struct kprobe *kp,
        __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
        size = ALIGN(__size + sizeof(u32), sizeof(u64));
        size -= sizeof(u32);
        __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
        size = ALIGN(__size + sizeof(u32), sizeof(u64));
        size -= sizeof(u32);
-       if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
+       if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
                     "profile buffer not large enough"))
                return;
 
                     "profile buffer not large enough"))
                return;
 
-       entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags);
+       entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags);
        if (!entry)
                return;
 
        if (!entry)
                return;
 
@@ -1240,11 +1240,11 @@ static __kprobes void kprobe_profile_func(struct kprobe *kp,
        for (i = 0; i < tp->nr_args; i++)
                entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
 
        for (i = 0; i < tp->nr_args; i++)
                entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
 
-       ftrace_perf_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs);
+       perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs);
 }
 
 /* Kretprobe profile handler */
 }
 
 /* Kretprobe profile handler */
-static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri,
+static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
                                            struct pt_regs *regs)
 {
        struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
                                            struct pt_regs *regs)
 {
        struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
@@ -1257,11 +1257,11 @@ static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri,
        __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
        size = ALIGN(__size + sizeof(u32), sizeof(u64));
        size -= sizeof(u32);
        __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
        size = ALIGN(__size + sizeof(u32), sizeof(u64));
        size -= sizeof(u32);
-       if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
+       if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
                     "profile buffer not large enough"))
                return;
 
                     "profile buffer not large enough"))
                return;
 
-       entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags);
+       entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags);
        if (!entry)
                return;
 
        if (!entry)
                return;
 
@@ -1271,11 +1271,11 @@ static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri,
        for (i = 0; i < tp->nr_args; i++)
                entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
 
        for (i = 0; i < tp->nr_args; i++)
                entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
 
-       ftrace_perf_buf_submit(entry, size, rctx, entry->ret_ip, 1,
+       perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1,
                               irq_flags, regs);
 }
 
                               irq_flags, regs);
 }
 
-static int probe_profile_enable(struct ftrace_event_call *call)
+static int probe_perf_enable(struct ftrace_event_call *call)
 {
        struct trace_probe *tp = (struct trace_probe *)call->data;
 
 {
        struct trace_probe *tp = (struct trace_probe *)call->data;
 
@@ -1287,7 +1287,7 @@ static int probe_profile_enable(struct ftrace_event_call *call)
                return enable_kprobe(&tp->rp.kp);
 }
 
                return enable_kprobe(&tp->rp.kp);
 }
 
-static void probe_profile_disable(struct ftrace_event_call *call)
+static void probe_perf_disable(struct ftrace_event_call *call)
 {
        struct trace_probe *tp = (struct trace_probe *)call->data;
 
 {
        struct trace_probe *tp = (struct trace_probe *)call->data;
 
@@ -1312,7 +1312,7 @@ int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
                kprobe_trace_func(kp, regs);
 #ifdef CONFIG_PERF_EVENTS
        if (tp->flags & TP_FLAG_PROFILE)
                kprobe_trace_func(kp, regs);
 #ifdef CONFIG_PERF_EVENTS
        if (tp->flags & TP_FLAG_PROFILE)
-               kprobe_profile_func(kp, regs);
+               kprobe_perf_func(kp, regs);
 #endif
        return 0;       /* We don't tweek kernel, so just return 0 */
 }
 #endif
        return 0;       /* We don't tweek kernel, so just return 0 */
 }
@@ -1326,7 +1326,7 @@ int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
                kretprobe_trace_func(ri, regs);
 #ifdef CONFIG_PERF_EVENTS
        if (tp->flags & TP_FLAG_PROFILE)
                kretprobe_trace_func(ri, regs);
 #ifdef CONFIG_PERF_EVENTS
        if (tp->flags & TP_FLAG_PROFILE)
-               kretprobe_profile_func(ri, regs);
+               kretprobe_perf_func(ri, regs);
 #endif
        return 0;       /* We don't tweek kernel, so just return 0 */
 }
 #endif
        return 0;       /* We don't tweek kernel, so just return 0 */
 }
@@ -1359,8 +1359,8 @@ static int register_probe_event(struct trace_probe *tp)
        call->unregfunc = probe_event_disable;
 
 #ifdef CONFIG_PERF_EVENTS
        call->unregfunc = probe_event_disable;
 
 #ifdef CONFIG_PERF_EVENTS
-       call->profile_enable = probe_profile_enable;
-       call->profile_disable = probe_profile_disable;
+       call->perf_event_enable = probe_perf_enable;
+       call->perf_event_disable = probe_perf_disable;
 #endif
        call->data = tp;
        ret = trace_add_event_call(call);
 #endif
        call->data = tp;
        ret = trace_add_event_call(call);
index 7e6e84f..33c2a5b 100644 (file)
@@ -428,12 +428,12 @@ core_initcall(init_ftrace_syscalls);
 
 #ifdef CONFIG_PERF_EVENTS
 
 
 #ifdef CONFIG_PERF_EVENTS
 
-static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls);
-static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls);
-static int sys_prof_refcount_enter;
-static int sys_prof_refcount_exit;
+static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls);
+static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls);
+static int sys_perf_refcount_enter;
+static int sys_perf_refcount_exit;
 
 
-static void prof_syscall_enter(struct pt_regs *regs, long id)
+static void perf_syscall_enter(struct pt_regs *regs, long id)
 {
        struct syscall_metadata *sys_data;
        struct syscall_trace_enter *rec;
 {
        struct syscall_metadata *sys_data;
        struct syscall_trace_enter *rec;
@@ -443,7 +443,7 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
        int size;
 
        syscall_nr = syscall_get_nr(current, regs);
        int size;
 
        syscall_nr = syscall_get_nr(current, regs);
-       if (!test_bit(syscall_nr, enabled_prof_enter_syscalls))
+       if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
                return;
 
        sys_data = syscall_nr_to_meta(syscall_nr);
                return;
 
        sys_data = syscall_nr_to_meta(syscall_nr);
@@ -455,11 +455,11 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
        size = ALIGN(size + sizeof(u32), sizeof(u64));
        size -= sizeof(u32);
 
        size = ALIGN(size + sizeof(u32), sizeof(u64));
        size -= sizeof(u32);
 
-       if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
-                     "profile buffer not large enough"))
+       if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
+                     "perf buffer not large enough"))
                return;
 
                return;
 
-       rec = (struct syscall_trace_enter *)ftrace_perf_buf_prepare(size,
+       rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
                                sys_data->enter_event->id, &rctx, &flags);
        if (!rec)
                return;
                                sys_data->enter_event->id, &rctx, &flags);
        if (!rec)
                return;
@@ -467,10 +467,10 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
        rec->nr = syscall_nr;
        syscall_get_arguments(current, regs, 0, sys_data->nb_args,
                               (unsigned long *)&rec->args);
        rec->nr = syscall_nr;
        syscall_get_arguments(current, regs, 0, sys_data->nb_args,
                               (unsigned long *)&rec->args);
-       ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags, regs);
+       perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs);
 }
 
 }
 
-int prof_sysenter_enable(struct ftrace_event_call *call)
+int perf_sysenter_enable(struct ftrace_event_call *call)
 {
        int ret = 0;
        int num;
 {
        int ret = 0;
        int num;
@@ -478,34 +478,34 @@ int prof_sysenter_enable(struct ftrace_event_call *call)
        num = ((struct syscall_metadata *)call->data)->syscall_nr;
 
        mutex_lock(&syscall_trace_lock);
        num = ((struct syscall_metadata *)call->data)->syscall_nr;
 
        mutex_lock(&syscall_trace_lock);
-       if (!sys_prof_refcount_enter)
-               ret = register_trace_sys_enter(prof_syscall_enter);
+       if (!sys_perf_refcount_enter)
+               ret = register_trace_sys_enter(perf_syscall_enter);
        if (ret) {
                pr_info("event trace: Could not activate"
                                "syscall entry trace point");
        } else {
        if (ret) {
                pr_info("event trace: Could not activate"
                                "syscall entry trace point");
        } else {
-               set_bit(num, enabled_prof_enter_syscalls);
-               sys_prof_refcount_enter++;
+               set_bit(num, enabled_perf_enter_syscalls);
+               sys_perf_refcount_enter++;
        }
        mutex_unlock(&syscall_trace_lock);
        return ret;
 }
 
        }
        mutex_unlock(&syscall_trace_lock);
        return ret;
 }
 
-void prof_sysenter_disable(struct ftrace_event_call *call)
+void perf_sysenter_disable(struct ftrace_event_call *call)
 {
        int num;
 
        num = ((struct syscall_metadata *)call->data)->syscall_nr;
 
        mutex_lock(&syscall_trace_lock);
 {
        int num;
 
        num = ((struct syscall_metadata *)call->data)->syscall_nr;
 
        mutex_lock(&syscall_trace_lock);
-       sys_prof_refcount_enter--;
-       clear_bit(num, enabled_prof_enter_syscalls);
-       if (!sys_prof_refcount_enter)
-               unregister_trace_sys_enter(prof_syscall_enter);
+       sys_perf_refcount_enter--;
+       clear_bit(num, enabled_perf_enter_syscalls);
+       if (!sys_perf_refcount_enter)
+               unregister_trace_sys_enter(perf_syscall_enter);
        mutex_unlock(&syscall_trace_lock);
 }
 
        mutex_unlock(&syscall_trace_lock);
 }
 
-static void prof_syscall_exit(struct pt_regs *regs, long ret)
+static void perf_syscall_exit(struct pt_regs *regs, long ret)
 {
        struct syscall_metadata *sys_data;
        struct syscall_trace_exit *rec;
 {
        struct syscall_metadata *sys_data;
        struct syscall_trace_exit *rec;
@@ -515,7 +515,7 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
        int size;
 
        syscall_nr = syscall_get_nr(current, regs);
        int size;
 
        syscall_nr = syscall_get_nr(current, regs);
-       if (!test_bit(syscall_nr, enabled_prof_exit_syscalls))
+       if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
                return;
 
        sys_data = syscall_nr_to_meta(syscall_nr);
                return;
 
        sys_data = syscall_nr_to_meta(syscall_nr);
@@ -530,11 +530,11 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
         * Impossible, but be paranoid with the future
         * How to put this check outside runtime?
         */
         * Impossible, but be paranoid with the future
         * How to put this check outside runtime?
         */
-       if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
-               "exit event has grown above profile buffer size"))
+       if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
+               "exit event has grown above perf buffer size"))
                return;
 
                return;
 
-       rec = (struct syscall_trace_exit *)ftrace_perf_buf_prepare(size,
+       rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
                                sys_data->exit_event->id, &rctx, &flags);
        if (!rec)
                return;
                                sys_data->exit_event->id, &rctx, &flags);
        if (!rec)
                return;
@@ -542,10 +542,10 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
        rec->nr = syscall_nr;
        rec->ret = syscall_get_return_value(current, regs);
 
        rec->nr = syscall_nr;
        rec->ret = syscall_get_return_value(current, regs);
 
-       ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags, regs);
+       perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs);
 }
 
 }
 
-int prof_sysexit_enable(struct ftrace_event_call *call)
+int perf_sysexit_enable(struct ftrace_event_call *call)
 {
        int ret = 0;
        int num;
 {
        int ret = 0;
        int num;
@@ -553,30 +553,30 @@ int prof_sysexit_enable(struct ftrace_event_call *call)
        num = ((struct syscall_metadata *)call->data)->syscall_nr;
 
        mutex_lock(&syscall_trace_lock);
        num = ((struct syscall_metadata *)call->data)->syscall_nr;
 
        mutex_lock(&syscall_trace_lock);
-       if (!sys_prof_refcount_exit)
-               ret = register_trace_sys_exit(prof_syscall_exit);
+       if (!sys_perf_refcount_exit)
+               ret = register_trace_sys_exit(perf_syscall_exit);
        if (ret) {
                pr_info("event trace: Could not activate"
                                "syscall exit trace point");
        } else {
        if (ret) {
                pr_info("event trace: Could not activate"
                                "syscall exit trace point");
        } else {
-               set_bit(num, enabled_prof_exit_syscalls);
-               sys_prof_refcount_exit++;
+               set_bit(num, enabled_perf_exit_syscalls);
+               sys_perf_refcount_exit++;
        }
        mutex_unlock(&syscall_trace_lock);
        return ret;
 }
 
        }
        mutex_unlock(&syscall_trace_lock);
        return ret;
 }
 
-void prof_sysexit_disable(struct ftrace_event_call *call)
+void perf_sysexit_disable(struct ftrace_event_call *call)
 {
        int num;
 
        num = ((struct syscall_metadata *)call->data)->syscall_nr;
 
        mutex_lock(&syscall_trace_lock);
 {
        int num;
 
        num = ((struct syscall_metadata *)call->data)->syscall_nr;
 
        mutex_lock(&syscall_trace_lock);
-       sys_prof_refcount_exit--;
-       clear_bit(num, enabled_prof_exit_syscalls);
-       if (!sys_prof_refcount_exit)
-               unregister_trace_sys_exit(prof_syscall_exit);
+       sys_perf_refcount_exit--;
+       clear_bit(num, enabled_perf_exit_syscalls);
+       if (!sys_perf_refcount_exit)
+               unregister_trace_sys_exit(perf_syscall_exit);
        mutex_unlock(&syscall_trace_lock);
 }
 
        mutex_unlock(&syscall_trace_lock);
 }