trace: Remove unused trace_array_cpu parameter
[safe/jmp/linux-2.6] / kernel / trace / trace_functions.c
1 /*
2  * ring buffer based function tracer
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Based on code from the latency_tracer, that is:
8  *
9  *  Copyright (C) 2004-2006 Ingo Molnar
10  *  Copyright (C) 2004 William Lee Irwin III
11  */
12 #include <linux/debugfs.h>
13 #include <linux/uaccess.h>
14 #include <linux/ftrace.h>
15 #include <linux/fs.h>
16
17 #include "trace.h"
18
19 /* function tracing enabled */
20 static int                      ftrace_function_enabled;
21
22 static struct trace_array       *func_trace;
23
24 static void tracing_start_function_trace(void);
25 static void tracing_stop_function_trace(void);
26
27 static void start_function_trace(struct trace_array *tr)
28 {
29         func_trace = tr;
30         tr->cpu = get_cpu();
31         tracing_reset_online_cpus(tr);
32         put_cpu();
33
34         tracing_start_cmdline_record();
35         tracing_start_function_trace();
36 }
37
38 static void stop_function_trace(struct trace_array *tr)
39 {
40         tracing_stop_function_trace();
41         tracing_stop_cmdline_record();
42 }
43
44 static int function_trace_init(struct trace_array *tr)
45 {
46         start_function_trace(tr);
47         return 0;
48 }
49
50 static void function_trace_reset(struct trace_array *tr)
51 {
52         stop_function_trace(tr);
53 }
54
55 static void function_trace_start(struct trace_array *tr)
56 {
57         tracing_reset_online_cpus(tr);
58 }
59
60 static void
61 function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
62 {
63         struct trace_array *tr = func_trace;
64         struct trace_array_cpu *data;
65         unsigned long flags;
66         long disabled;
67         int cpu, resched;
68         int pc;
69
70         if (unlikely(!ftrace_function_enabled))
71                 return;
72
73         pc = preempt_count();
74         resched = ftrace_preempt_disable();
75         local_save_flags(flags);
76         cpu = raw_smp_processor_id();
77         data = tr->data[cpu];
78         disabled = atomic_inc_return(&data->disabled);
79
80         if (likely(disabled == 1))
81                 trace_function(tr, ip, parent_ip, flags, pc);
82
83         atomic_dec(&data->disabled);
84         ftrace_preempt_enable(resched);
85 }
86
87 static void
88 function_trace_call(unsigned long ip, unsigned long parent_ip)
89 {
90         struct trace_array *tr = func_trace;
91         struct trace_array_cpu *data;
92         unsigned long flags;
93         long disabled;
94         int cpu;
95         int pc;
96
97         if (unlikely(!ftrace_function_enabled))
98                 return;
99
100         /*
101          * Need to use raw, since this must be called before the
102          * recursive protection is performed.
103          */
104         local_irq_save(flags);
105         cpu = raw_smp_processor_id();
106         data = tr->data[cpu];
107         disabled = atomic_inc_return(&data->disabled);
108
109         if (likely(disabled == 1)) {
110                 pc = preempt_count();
111                 trace_function(tr, ip, parent_ip, flags, pc);
112         }
113
114         atomic_dec(&data->disabled);
115         local_irq_restore(flags);
116 }
117
118 static void
119 function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
120 {
121         struct trace_array *tr = func_trace;
122         struct trace_array_cpu *data;
123         unsigned long flags;
124         long disabled;
125         int cpu;
126         int pc;
127
128         if (unlikely(!ftrace_function_enabled))
129                 return;
130
131         /*
132          * Need to use raw, since this must be called before the
133          * recursive protection is performed.
134          */
135         local_irq_save(flags);
136         cpu = raw_smp_processor_id();
137         data = tr->data[cpu];
138         disabled = atomic_inc_return(&data->disabled);
139
140         if (likely(disabled == 1)) {
141                 pc = preempt_count();
142                 trace_function(tr, ip, parent_ip, flags, pc);
143                 /*
144                  * skip over 5 funcs:
145                  *    __ftrace_trace_stack,
146                  *    __trace_stack,
147                  *    function_stack_trace_call
148                  *    ftrace_list_func
149                  *    ftrace_call
150                  */
151                 __trace_stack(tr, flags, 5, pc);
152         }
153
154         atomic_dec(&data->disabled);
155         local_irq_restore(flags);
156 }
157
158
159 static struct ftrace_ops trace_ops __read_mostly =
160 {
161         .func = function_trace_call,
162 };
163
164 static struct ftrace_ops trace_stack_ops __read_mostly =
165 {
166         .func = function_stack_trace_call,
167 };
168
169 /* Our two options */
170 enum {
171         TRACE_FUNC_OPT_STACK = 0x1,
172 };
173
174 static struct tracer_opt func_opts[] = {
175 #ifdef CONFIG_STACKTRACE
176         { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
177 #endif
178         { } /* Always set a last empty entry */
179 };
180
181 static struct tracer_flags func_flags = {
182         .val = 0, /* By default: all flags disabled */
183         .opts = func_opts
184 };
185
186 static void tracing_start_function_trace(void)
187 {
188         ftrace_function_enabled = 0;
189
190         if (trace_flags & TRACE_ITER_PREEMPTONLY)
191                 trace_ops.func = function_trace_call_preempt_only;
192         else
193                 trace_ops.func = function_trace_call;
194
195         if (func_flags.val & TRACE_FUNC_OPT_STACK)
196                 register_ftrace_function(&trace_stack_ops);
197         else
198                 register_ftrace_function(&trace_ops);
199
200         ftrace_function_enabled = 1;
201 }
202
203 static void tracing_stop_function_trace(void)
204 {
205         ftrace_function_enabled = 0;
206         /* OK if they are not registered */
207         unregister_ftrace_function(&trace_stack_ops);
208         unregister_ftrace_function(&trace_ops);
209 }
210
211 static int func_set_flag(u32 old_flags, u32 bit, int set)
212 {
213         if (bit == TRACE_FUNC_OPT_STACK) {
214                 /* do nothing if already set */
215                 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
216                         return 0;
217
218                 if (set) {
219                         unregister_ftrace_function(&trace_ops);
220                         register_ftrace_function(&trace_stack_ops);
221                 } else {
222                         unregister_ftrace_function(&trace_stack_ops);
223                         register_ftrace_function(&trace_ops);
224                 }
225
226                 return 0;
227         }
228
229         return -EINVAL;
230 }
231
232 static struct tracer function_trace __read_mostly =
233 {
234         .name           = "function",
235         .init           = function_trace_init,
236         .reset          = function_trace_reset,
237         .start          = function_trace_start,
238         .flags          = &func_flags,
239         .set_flag       = func_set_flag,
240 #ifdef CONFIG_FTRACE_SELFTEST
241         .selftest       = trace_selftest_startup_function,
242 #endif
243 };
244
245 static __init int init_function_trace(void)
246 {
247         return register_tracer(&function_trace);
248 }
249
250 device_initcall(init_function_trace);