23f9b02ce967a2a2564a13e67e77b92552bbfb6f
[safe/jmp/linux-2.6] / kernel / trace / trace_branch.c
1 /*
2  * unlikely profiler
3  *
4  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5  */
6 #include <linux/kallsyms.h>
7 #include <linux/seq_file.h>
8 #include <linux/spinlock.h>
9 #include <linux/debugfs.h>
10 #include <linux/uaccess.h>
11 #include <linux/module.h>
12 #include <linux/ftrace.h>
13 #include <linux/hash.h>
14 #include <linux/fs.h>
15 #include <asm/local.h>
16 #include "trace.h"
17
18 #ifdef CONFIG_BRANCH_TRACER
19
20 static int branch_tracing_enabled __read_mostly;
21 static DEFINE_MUTEX(branch_tracing_mutex);
22 static struct trace_array *branch_tracer;
23
24 static void
25 probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
26 {
27         struct trace_array *tr = branch_tracer;
28         struct ring_buffer_event *event;
29         struct trace_branch *entry;
30         unsigned long flags, irq_flags;
31         int cpu, pc;
32         const char *p;
33
34         /*
35          * I would love to save just the ftrace_likely_data pointer, but
36          * this code can also be used by modules. Ugly things can happen
37          * if the module is unloaded, and then we go and read the
38          * pointer.  This is slower, but much safer.
39          */
40
41         if (unlikely(!tr))
42                 return;
43
44         raw_local_irq_save(flags);
45         cpu = raw_smp_processor_id();
46         if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
47                 goto out;
48
49         event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
50                                          &irq_flags);
51         if (!event)
52                 goto out;
53
54         pc = preempt_count();
55         entry   = ring_buffer_event_data(event);
56         tracing_generic_entry_update(&entry->ent, flags, pc);
57         entry->ent.type         = TRACE_BRANCH;
58
59         /* Strip off the path, only save the file */
60         p = f->file + strlen(f->file);
61         while (p >= f->file && *p != '/')
62                 p--;
63         p++;
64
65         strncpy(entry->func, f->func, TRACE_FUNC_SIZE);
66         strncpy(entry->file, p, TRACE_FILE_SIZE);
67         entry->func[TRACE_FUNC_SIZE] = 0;
68         entry->file[TRACE_FILE_SIZE] = 0;
69         entry->line = f->line;
70         entry->correct = val == expect;
71
72         ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
73
74  out:
75         atomic_dec(&tr->data[cpu]->disabled);
76         raw_local_irq_restore(flags);
77 }
78
79 static inline
80 void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
81 {
82         if (!branch_tracing_enabled)
83                 return;
84
85         probe_likely_condition(f, val, expect);
86 }
87
88 int enable_branch_tracing(struct trace_array *tr)
89 {
90         int ret = 0;
91
92         mutex_lock(&branch_tracing_mutex);
93         branch_tracer = tr;
94         /*
95          * Must be seen before enabling. The reader is a condition
96          * where we do not need a matching rmb()
97          */
98         smp_wmb();
99         branch_tracing_enabled++;
100         mutex_unlock(&branch_tracing_mutex);
101
102         return ret;
103 }
104
105 void disable_branch_tracing(void)
106 {
107         mutex_lock(&branch_tracing_mutex);
108
109         if (!branch_tracing_enabled)
110                 goto out_unlock;
111
112         branch_tracing_enabled--;
113
114  out_unlock:
115         mutex_unlock(&branch_tracing_mutex);
116 }
117
118 static void start_branch_trace(struct trace_array *tr)
119 {
120         enable_branch_tracing(tr);
121 }
122
123 static void stop_branch_trace(struct trace_array *tr)
124 {
125         disable_branch_tracing();
126 }
127
128 static int branch_trace_init(struct trace_array *tr)
129 {
130         int cpu;
131
132         for_each_online_cpu(cpu)
133                 tracing_reset(tr, cpu);
134
135         start_branch_trace(tr);
136         return 0;
137 }
138
139 static void branch_trace_reset(struct trace_array *tr)
140 {
141         stop_branch_trace(tr);
142 }
143
144 struct tracer branch_trace __read_mostly =
145 {
146         .name           = "branch",
147         .init           = branch_trace_init,
148         .reset          = branch_trace_reset,
149 #ifdef CONFIG_FTRACE_SELFTEST
150         .selftest       = trace_selftest_startup_branch,
151 #endif
152 };
153
154 __init static int init_branch_trace(void)
155 {
156         return register_tracer(&branch_trace);
157 }
158
159 device_initcall(init_branch_trace);
160 #else
161 static inline
162 void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
163 {
164 }
165 #endif /* CONFIG_BRANCH_TRACER */
166
167 void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect)
168 {
169         /*
170          * I would love to have a trace point here instead, but the
171          * trace point code is so inundated with unlikely and likely
172          * conditions that the recursive nightmare that exists is too
173          * much to try to get working. At least for now.
174          */
175         trace_likely_condition(f, val, expect);
176
177         /* FIXME: Make this atomic! */
178         if (val == expect)
179                 f->correct++;
180         else
181                 f->incorrect++;
182 }
183 EXPORT_SYMBOL(ftrace_likely_update);
184
185 struct ftrace_pointer {
186         void            *start;
187         void            *stop;
188 };
189
190 static void *
191 t_next(struct seq_file *m, void *v, loff_t *pos)
192 {
193         struct ftrace_pointer *f = m->private;
194         struct ftrace_branch_data *p = v;
195
196         (*pos)++;
197
198         if (v == (void *)1)
199                 return f->start;
200
201         ++p;
202
203         if ((void *)p >= (void *)f->stop)
204                 return NULL;
205
206         return p;
207 }
208
209 static void *t_start(struct seq_file *m, loff_t *pos)
210 {
211         void *t = (void *)1;
212         loff_t l = 0;
213
214         for (; t && l < *pos; t = t_next(m, t, &l))
215                 ;
216
217         return t;
218 }
219
220 static void t_stop(struct seq_file *m, void *p)
221 {
222 }
223
224 static int t_show(struct seq_file *m, void *v)
225 {
226         struct ftrace_branch_data *p = v;
227         const char *f;
228         unsigned long percent;
229
230         if (v == (void *)1) {
231                 seq_printf(m, " correct incorrect  %% "
232                               "       Function                "
233                               "  File              Line\n"
234                               " ------- ---------  - "
235                               "       --------                "
236                               "  ----              ----\n");
237                 return 0;
238         }
239
240         /* Only print the file, not the path */
241         f = p->file + strlen(p->file);
242         while (f >= p->file && *f != '/')
243                 f--;
244         f++;
245
246         if (p->correct) {
247                 percent = p->incorrect * 100;
248                 percent /= p->correct + p->incorrect;
249         } else
250                 percent = p->incorrect ? 100 : 0;
251
252         seq_printf(m, "%8lu %8lu %3lu ", p->correct, p->incorrect, percent);
253         seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line);
254         return 0;
255 }
256
257 static struct seq_operations tracing_likely_seq_ops = {
258         .start          = t_start,
259         .next           = t_next,
260         .stop           = t_stop,
261         .show           = t_show,
262 };
263
264 static int tracing_likely_open(struct inode *inode, struct file *file)
265 {
266         int ret;
267
268         ret = seq_open(file, &tracing_likely_seq_ops);
269         if (!ret) {
270                 struct seq_file *m = file->private_data;
271                 m->private = (void *)inode->i_private;
272         }
273
274         return ret;
275 }
276
277 static struct file_operations tracing_likely_fops = {
278         .open           = tracing_likely_open,
279         .read           = seq_read,
280         .llseek         = seq_lseek,
281 };
282
283 extern unsigned long __start_likely_profile[];
284 extern unsigned long __stop_likely_profile[];
285 extern unsigned long __start_unlikely_profile[];
286 extern unsigned long __stop_unlikely_profile[];
287
288 static struct ftrace_pointer ftrace_likely_pos = {
289         .start                  = __start_likely_profile,
290         .stop                   = __stop_likely_profile,
291 };
292
293 static struct ftrace_pointer ftrace_unlikely_pos = {
294         .start                  = __start_unlikely_profile,
295         .stop                   = __stop_unlikely_profile,
296 };
297
298 static __init int ftrace_branch_init(void)
299 {
300         struct dentry *d_tracer;
301         struct dentry *entry;
302
303         d_tracer = tracing_init_dentry();
304
305         entry = debugfs_create_file("profile_likely", 0444, d_tracer,
306                                     &ftrace_likely_pos,
307                                     &tracing_likely_fops);
308         if (!entry)
309                 pr_warning("Could not create debugfs 'profile_likely' entry\n");
310
311         entry = debugfs_create_file("profile_unlikely", 0444, d_tracer,
312                                     &ftrace_unlikely_pos,
313                                     &tracing_likely_fops);
314         if (!entry)
315                 pr_warning("Could not create debugfs"
316                            " 'profile_unlikely' entry\n");
317
318         return 0;
319 }
320
321 device_initcall(ftrace_branch_init);