Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
[safe/jmp/linux-2.6] / kernel / trace / trace_syscalls.c
index 8cdda95..4d6d711 100644 (file)
@@ -1,5 +1,6 @@
 #include <trace/syscall.h>
 #include <trace/events/syscalls.h>
+#include <linux/slab.h>
 #include <linux/kernel.h>
 #include <linux/ftrace.h>
 #include <linux/perf_event.h>
@@ -394,6 +395,11 @@ int init_syscall_trace(struct ftrace_event_call *call)
        return id;
 }
 
+unsigned long __init arch_syscall_addr(int nr)
+{
+       return (unsigned long)sys_call_table[nr];
+}
+
 int __init init_ftrace_syscalls(void)
 {
        struct syscall_metadata *meta;
@@ -421,27 +427,24 @@ int __init init_ftrace_syscalls(void)
 }
 core_initcall(init_ftrace_syscalls);
 
-#ifdef CONFIG_EVENT_PROFILE
+#ifdef CONFIG_PERF_EVENTS
 
-static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls);
-static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls);
-static int sys_prof_refcount_enter;
-static int sys_prof_refcount_exit;
+static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls);
+static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls);
+static int sys_perf_refcount_enter;
+static int sys_perf_refcount_exit;
 
-static void prof_syscall_enter(struct pt_regs *regs, long id)
+static void perf_syscall_enter(struct pt_regs *regs, long id)
 {
        struct syscall_metadata *sys_data;
        struct syscall_trace_enter *rec;
        unsigned long flags;
-       char *trace_buf;
-       char *raw_data;
        int syscall_nr;
        int rctx;
        int size;
-       int cpu;
 
        syscall_nr = syscall_get_nr(current, regs);
-       if (!test_bit(syscall_nr, enabled_prof_enter_syscalls))
+       if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
                return;
 
        sys_data = syscall_nr_to_meta(syscall_nr);
@@ -453,44 +456,22 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
        size = ALIGN(size + sizeof(u32), sizeof(u64));
        size -= sizeof(u32);
 
-       if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
-                     "profile buffer not large enough"))
+       if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
+                     "perf buffer not large enough"))
                return;
 
-       /* Protect the per cpu buffer, begin the rcu read side */
-       local_irq_save(flags);
-
-       rctx = perf_swevent_get_recursion_context();
-       if (rctx < 0)
-               goto end_recursion;
-
-       cpu = smp_processor_id();
-
-       trace_buf = rcu_dereference(perf_trace_buf);
-
-       if (!trace_buf)
-               goto end;
-
-       raw_data = per_cpu_ptr(trace_buf, cpu);
-
-       /* zero the dead bytes from align to not leak stack to user */
-       *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
+       rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
+                               sys_data->enter_event->id, &rctx, &flags);
+       if (!rec)
+               return;
 
-       rec = (struct syscall_trace_enter *) raw_data;
-       tracing_generic_entry_update(&rec->ent, 0, 0);
-       rec->ent.type = sys_data->enter_event->id;
        rec->nr = syscall_nr;
        syscall_get_arguments(current, regs, 0, sys_data->nb_args,
                               (unsigned long *)&rec->args);
-       perf_tp_event(sys_data->enter_event->id, 0, 1, rec, size);
-
-end:
-       perf_swevent_put_recursion_context(rctx);
-end_recursion:
-       local_irq_restore(flags);
+       perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs);
 }
 
-int prof_sysenter_enable(struct ftrace_event_call *call)
+int perf_sysenter_enable(struct ftrace_event_call *call)
 {
        int ret = 0;
        int num;
@@ -498,47 +479,44 @@ int prof_sysenter_enable(struct ftrace_event_call *call)
        num = ((struct syscall_metadata *)call->data)->syscall_nr;
 
        mutex_lock(&syscall_trace_lock);
-       if (!sys_prof_refcount_enter)
-               ret = register_trace_sys_enter(prof_syscall_enter);
+       if (!sys_perf_refcount_enter)
+               ret = register_trace_sys_enter(perf_syscall_enter);
        if (ret) {
                pr_info("event trace: Could not activate"
                                "syscall entry trace point");
        } else {
-               set_bit(num, enabled_prof_enter_syscalls);
-               sys_prof_refcount_enter++;
+               set_bit(num, enabled_perf_enter_syscalls);
+               sys_perf_refcount_enter++;
        }
        mutex_unlock(&syscall_trace_lock);
        return ret;
 }
 
-void prof_sysenter_disable(struct ftrace_event_call *call)
+void perf_sysenter_disable(struct ftrace_event_call *call)
 {
        int num;
 
        num = ((struct syscall_metadata *)call->data)->syscall_nr;
 
        mutex_lock(&syscall_trace_lock);
-       sys_prof_refcount_enter--;
-       clear_bit(num, enabled_prof_enter_syscalls);
-       if (!sys_prof_refcount_enter)
-               unregister_trace_sys_enter(prof_syscall_enter);
+       sys_perf_refcount_enter--;
+       clear_bit(num, enabled_perf_enter_syscalls);
+       if (!sys_perf_refcount_enter)
+               unregister_trace_sys_enter(perf_syscall_enter);
        mutex_unlock(&syscall_trace_lock);
 }
 
-static void prof_syscall_exit(struct pt_regs *regs, long ret)
+static void perf_syscall_exit(struct pt_regs *regs, long ret)
 {
        struct syscall_metadata *sys_data;
        struct syscall_trace_exit *rec;
        unsigned long flags;
        int syscall_nr;
-       char *trace_buf;
-       char *raw_data;
        int rctx;
        int size;
-       int cpu;
 
        syscall_nr = syscall_get_nr(current, regs);
-       if (!test_bit(syscall_nr, enabled_prof_exit_syscalls))
+       if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
                return;
 
        sys_data = syscall_nr_to_meta(syscall_nr);
@@ -553,45 +531,22 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
         * Impossible, but be paranoid with the future
         * How to put this check outside runtime?
         */
-       if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
-               "exit event has grown above profile buffer size"))
+       if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
+               "exit event has grown above perf buffer size"))
                return;
 
-       /* Protect the per cpu buffer, begin the rcu read side */
-       local_irq_save(flags);
-
-       rctx = perf_swevent_get_recursion_context();
-       if (rctx < 0)
-               goto end_recursion;
-
-       cpu = smp_processor_id();
-
-       trace_buf = rcu_dereference(perf_trace_buf);
-
-       if (!trace_buf)
-               goto end;
-
-       raw_data = per_cpu_ptr(trace_buf, cpu);
-
-       /* zero the dead bytes from align to not leak stack to user */
-       *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
-
-       rec = (struct syscall_trace_exit *)raw_data;
+       rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
+                               sys_data->exit_event->id, &rctx, &flags);
+       if (!rec)
+               return;
 
-       tracing_generic_entry_update(&rec->ent, 0, 0);
-       rec->ent.type = sys_data->exit_event->id;
        rec->nr = syscall_nr;
        rec->ret = syscall_get_return_value(current, regs);
 
-       perf_tp_event(sys_data->exit_event->id, 0, 1, rec, size);
-
-end:
-       perf_swevent_put_recursion_context(rctx);
-end_recursion:
-       local_irq_restore(flags);
+       perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs);
 }
 
-int prof_sysexit_enable(struct ftrace_event_call *call)
+int perf_sysexit_enable(struct ftrace_event_call *call)
 {
        int ret = 0;
        int num;
@@ -599,33 +554,32 @@ int prof_sysexit_enable(struct ftrace_event_call *call)
        num = ((struct syscall_metadata *)call->data)->syscall_nr;
 
        mutex_lock(&syscall_trace_lock);
-       if (!sys_prof_refcount_exit)
-               ret = register_trace_sys_exit(prof_syscall_exit);
+       if (!sys_perf_refcount_exit)
+               ret = register_trace_sys_exit(perf_syscall_exit);
        if (ret) {
                pr_info("event trace: Could not activate"
                                "syscall exit trace point");
        } else {
-               set_bit(num, enabled_prof_exit_syscalls);
-               sys_prof_refcount_exit++;
+               set_bit(num, enabled_perf_exit_syscalls);
+               sys_perf_refcount_exit++;
        }
        mutex_unlock(&syscall_trace_lock);
        return ret;
 }
 
-void prof_sysexit_disable(struct ftrace_event_call *call)
+void perf_sysexit_disable(struct ftrace_event_call *call)
 {
        int num;
 
        num = ((struct syscall_metadata *)call->data)->syscall_nr;
 
        mutex_lock(&syscall_trace_lock);
-       sys_prof_refcount_exit--;
-       clear_bit(num, enabled_prof_exit_syscalls);
-       if (!sys_prof_refcount_exit)
-               unregister_trace_sys_exit(prof_syscall_exit);
+       sys_perf_refcount_exit--;
+       clear_bit(num, enabled_perf_exit_syscalls);
+       if (!sys_perf_refcount_exit)
+               unregister_trace_sys_exit(perf_syscall_exit);
        mutex_unlock(&syscall_trace_lock);
 }
 
-#endif
-
+#endif /* CONFIG_PERF_EVENTS */