tracing/filters: Defer pred allocation, fix memory leak
[safe/jmp/linux-2.6] / kernel / trace / trace_sched_switch.c
1 /*
2  * trace context switch
3  *
4  * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
5  *
6  */
7 #include <linux/module.h>
8 #include <linux/fs.h>
9 #include <linux/debugfs.h>
10 #include <linux/kallsyms.h>
11 #include <linux/uaccess.h>
12 #include <linux/ftrace.h>
13 #include <trace/events/sched.h>
14
15 #include "trace.h"
16
17 static struct trace_array       *ctx_trace;
18 static int __read_mostly        tracer_enabled;
19 static int                      sched_ref;
20 static DEFINE_MUTEX(sched_register_mutex);
21 static int                      sched_stopped;
22
23
24 void
25 tracing_sched_switch_trace(struct trace_array *tr,
26                            struct task_struct *prev,
27                            struct task_struct *next,
28                            unsigned long flags, int pc)
29 {
30         struct ftrace_event_call *call = &event_context_switch;
31         struct ring_buffer_event *event;
32         struct ctx_switch_entry *entry;
33
34         event = trace_buffer_lock_reserve(tr, TRACE_CTX,
35                                           sizeof(*entry), flags, pc);
36         if (!event)
37                 return;
38         entry   = ring_buffer_event_data(event);
39         entry->prev_pid                 = prev->pid;
40         entry->prev_prio                = prev->prio;
41         entry->prev_state               = prev->state;
42         entry->next_pid                 = next->pid;
43         entry->next_prio                = next->prio;
44         entry->next_state               = next->state;
45         entry->next_cpu = task_cpu(next);
46
47         if (!filter_check_discard(call, entry, tr->buffer, event))
48                 trace_buffer_unlock_commit(tr, event, flags, pc);
49 }
50
51 static void
52 probe_sched_switch(struct rq *__rq, struct task_struct *prev,
53                         struct task_struct *next)
54 {
55         struct trace_array_cpu *data;
56         unsigned long flags;
57         int cpu;
58         int pc;
59
60         if (unlikely(!sched_ref))
61                 return;
62
63         tracing_record_cmdline(prev);
64         tracing_record_cmdline(next);
65
66         if (!tracer_enabled || sched_stopped)
67                 return;
68
69         pc = preempt_count();
70         local_irq_save(flags);
71         cpu = raw_smp_processor_id();
72         data = ctx_trace->data[cpu];
73
74         if (likely(!atomic_read(&data->disabled)))
75                 tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc);
76
77         local_irq_restore(flags);
78 }
79
80 void
81 tracing_sched_wakeup_trace(struct trace_array *tr,
82                            struct task_struct *wakee,
83                            struct task_struct *curr,
84                            unsigned long flags, int pc)
85 {
86         struct ftrace_event_call *call = &event_wakeup;
87         struct ring_buffer_event *event;
88         struct ctx_switch_entry *entry;
89
90         event = trace_buffer_lock_reserve(tr, TRACE_WAKE,
91                                           sizeof(*entry), flags, pc);
92         if (!event)
93                 return;
94         entry   = ring_buffer_event_data(event);
95         entry->prev_pid                 = curr->pid;
96         entry->prev_prio                = curr->prio;
97         entry->prev_state               = curr->state;
98         entry->next_pid                 = wakee->pid;
99         entry->next_prio                = wakee->prio;
100         entry->next_state               = wakee->state;
101         entry->next_cpu                 = task_cpu(wakee);
102
103         if (!filter_check_discard(call, entry, tr->buffer, event))
104                 ring_buffer_unlock_commit(tr->buffer, event);
105         ftrace_trace_stack(tr, flags, 6, pc);
106         ftrace_trace_userstack(tr, flags, pc);
107 }
108
109 static void
110 probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success)
111 {
112         struct trace_array_cpu *data;
113         unsigned long flags;
114         int cpu, pc;
115
116         if (unlikely(!sched_ref))
117                 return;
118
119         tracing_record_cmdline(current);
120
121         if (!tracer_enabled || sched_stopped)
122                 return;
123
124         pc = preempt_count();
125         local_irq_save(flags);
126         cpu = raw_smp_processor_id();
127         data = ctx_trace->data[cpu];
128
129         if (likely(!atomic_read(&data->disabled)))
130                 tracing_sched_wakeup_trace(ctx_trace, wakee, current,
131                                            flags, pc);
132
133         local_irq_restore(flags);
134 }
135
136 static int tracing_sched_register(void)
137 {
138         int ret;
139
140         ret = register_trace_sched_wakeup(probe_sched_wakeup);
141         if (ret) {
142                 pr_info("wakeup trace: Couldn't activate tracepoint"
143                         " probe to kernel_sched_wakeup\n");
144                 return ret;
145         }
146
147         ret = register_trace_sched_wakeup_new(probe_sched_wakeup);
148         if (ret) {
149                 pr_info("wakeup trace: Couldn't activate tracepoint"
150                         " probe to kernel_sched_wakeup_new\n");
151                 goto fail_deprobe;
152         }
153
154         ret = register_trace_sched_switch(probe_sched_switch);
155         if (ret) {
156                 pr_info("sched trace: Couldn't activate tracepoint"
157                         " probe to kernel_sched_switch\n");
158                 goto fail_deprobe_wake_new;
159         }
160
161         return ret;
162 fail_deprobe_wake_new:
163         unregister_trace_sched_wakeup_new(probe_sched_wakeup);
164 fail_deprobe:
165         unregister_trace_sched_wakeup(probe_sched_wakeup);
166         return ret;
167 }
168
169 static void tracing_sched_unregister(void)
170 {
171         unregister_trace_sched_switch(probe_sched_switch);
172         unregister_trace_sched_wakeup_new(probe_sched_wakeup);
173         unregister_trace_sched_wakeup(probe_sched_wakeup);
174 }
175
176 static void tracing_start_sched_switch(void)
177 {
178         mutex_lock(&sched_register_mutex);
179         if (!(sched_ref++))
180                 tracing_sched_register();
181         mutex_unlock(&sched_register_mutex);
182 }
183
184 static void tracing_stop_sched_switch(void)
185 {
186         mutex_lock(&sched_register_mutex);
187         if (!(--sched_ref))
188                 tracing_sched_unregister();
189         mutex_unlock(&sched_register_mutex);
190 }
191
192 void tracing_start_cmdline_record(void)
193 {
194         tracing_start_sched_switch();
195 }
196
197 void tracing_stop_cmdline_record(void)
198 {
199         tracing_stop_sched_switch();
200 }
201
202 /**
203  * tracing_start_sched_switch_record - start tracing context switches
204  *
205  * Turns on context switch tracing for a tracer.
206  */
207 void tracing_start_sched_switch_record(void)
208 {
209         if (unlikely(!ctx_trace)) {
210                 WARN_ON(1);
211                 return;
212         }
213
214         tracing_start_sched_switch();
215
216         mutex_lock(&sched_register_mutex);
217         tracer_enabled++;
218         mutex_unlock(&sched_register_mutex);
219 }
220
221 /**
222  * tracing_stop_sched_switch_record - start tracing context switches
223  *
224  * Turns off context switch tracing for a tracer.
225  */
226 void tracing_stop_sched_switch_record(void)
227 {
228         mutex_lock(&sched_register_mutex);
229         tracer_enabled--;
230         WARN_ON(tracer_enabled < 0);
231         mutex_unlock(&sched_register_mutex);
232
233         tracing_stop_sched_switch();
234 }
235
236 /**
237  * tracing_sched_switch_assign_trace - assign a trace array for ctx switch
238  * @tr: trace array pointer to assign
239  *
240  * Some tracers might want to record the context switches in their
241  * trace. This function lets those tracers assign the trace array
242  * to use.
243  */
244 void tracing_sched_switch_assign_trace(struct trace_array *tr)
245 {
246         ctx_trace = tr;
247 }
248
249 static void stop_sched_trace(struct trace_array *tr)
250 {
251         tracing_stop_sched_switch_record();
252 }
253
254 static int sched_switch_trace_init(struct trace_array *tr)
255 {
256         ctx_trace = tr;
257         tracing_reset_online_cpus(tr);
258         tracing_start_sched_switch_record();
259         return 0;
260 }
261
262 static void sched_switch_trace_reset(struct trace_array *tr)
263 {
264         if (sched_ref)
265                 stop_sched_trace(tr);
266 }
267
268 static void sched_switch_trace_start(struct trace_array *tr)
269 {
270         sched_stopped = 0;
271 }
272
273 static void sched_switch_trace_stop(struct trace_array *tr)
274 {
275         sched_stopped = 1;
276 }
277
278 static struct tracer sched_switch_trace __read_mostly =
279 {
280         .name           = "sched_switch",
281         .init           = sched_switch_trace_init,
282         .reset          = sched_switch_trace_reset,
283         .start          = sched_switch_trace_start,
284         .stop           = sched_switch_trace_stop,
285         .wait_pipe      = poll_wait_pipe,
286 #ifdef CONFIG_FTRACE_SELFTEST
287         .selftest    = trace_selftest_startup_sched_switch,
288 #endif
289 };
290
291 __init static int init_sched_switch_trace(void)
292 {
293         return register_tracer(&sched_switch_trace);
294 }
295 device_initcall(init_sched_switch_trace);
296