Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
[safe/jmp/linux-2.6] / kernel / trace / trace_functions_graph.c
1 /*
2  *
3  * Function graph tracer.
4  * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5  * Mostly borrowed from function tracer which
6  * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7  *
8  */
9 #include <linux/debugfs.h>
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
12 #include <linux/fs.h>
13
14 #include "trace.h"
15 #include "trace_output.h"
16
17 struct fgraph_cpu_data {
18         pid_t           last_pid;
19         int             depth;
20         int             ignore;
21         unsigned long   enter_funcs[FTRACE_RETFUNC_DEPTH];
22 };
23
24 struct fgraph_data {
25         struct fgraph_cpu_data          *cpu_data;
26
27         /* Place to preserve last processed entry. */
28         struct ftrace_graph_ent_entry   ent;
29         struct ftrace_graph_ret_entry   ret;
30         int                             failed;
31         int                             cpu;
32 };
33
34 #define TRACE_GRAPH_INDENT      2
35
36 /* Flag options */
37 #define TRACE_GRAPH_PRINT_OVERRUN       0x1
38 #define TRACE_GRAPH_PRINT_CPU           0x2
39 #define TRACE_GRAPH_PRINT_OVERHEAD      0x4
40 #define TRACE_GRAPH_PRINT_PROC          0x8
41 #define TRACE_GRAPH_PRINT_DURATION      0x10
42 #define TRACE_GRAPH_PRINT_ABS_TIME      0X20
43
44 static struct tracer_opt trace_opts[] = {
45         /* Display overruns? (for self-debug purpose) */
46         { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
47         /* Display CPU ? */
48         { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
49         /* Display Overhead ? */
50         { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
51         /* Display proc name/pid */
52         { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
53         /* Display duration of execution */
54         { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
55         /* Display absolute time of an entry */
56         { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
57         { } /* Empty entry */
58 };
59
60 static struct tracer_flags tracer_flags = {
61         /* Don't display overruns and proc by default */
62         .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
63                TRACE_GRAPH_PRINT_DURATION,
64         .opts = trace_opts
65 };
66
67 static struct trace_array *graph_array;
68
69
70 /* Add a function return address to the trace stack on thread info.*/
71 int
72 ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
73                          unsigned long frame_pointer)
74 {
75         unsigned long long calltime;
76         int index;
77
78         if (!current->ret_stack)
79                 return -EBUSY;
80
81         /*
82          * We must make sure the ret_stack is tested before we read
83          * anything else.
84          */
85         smp_rmb();
86
87         /* The return trace stack is full */
88         if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
89                 atomic_inc(&current->trace_overrun);
90                 return -EBUSY;
91         }
92
93         calltime = trace_clock_local();
94
95         index = ++current->curr_ret_stack;
96         barrier();
97         current->ret_stack[index].ret = ret;
98         current->ret_stack[index].func = func;
99         current->ret_stack[index].calltime = calltime;
100         current->ret_stack[index].subtime = 0;
101         current->ret_stack[index].fp = frame_pointer;
102         *depth = index;
103
104         return 0;
105 }
106
107 /* Retrieve a function return address to the trace stack on thread info.*/
108 static void
109 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
110                         unsigned long frame_pointer)
111 {
112         int index;
113
114         index = current->curr_ret_stack;
115
116         if (unlikely(index < 0)) {
117                 ftrace_graph_stop();
118                 WARN_ON(1);
119                 /* Might as well panic, otherwise we have no where to go */
120                 *ret = (unsigned long)panic;
121                 return;
122         }
123
124 #ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
125         /*
126          * The arch may choose to record the frame pointer used
127          * and check it here to make sure that it is what we expect it
128          * to be. If gcc does not set the place holder of the return
129          * address in the frame pointer, and does a copy instead, then
130          * the function graph trace will fail. This test detects this
131          * case.
132          *
133          * Currently, x86_32 with optimize for size (-Os) makes the latest
134          * gcc do the above.
135          */
136         if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
137                 ftrace_graph_stop();
138                 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
139                      "  from func %ps return to %lx\n",
140                      current->ret_stack[index].fp,
141                      frame_pointer,
142                      (void *)current->ret_stack[index].func,
143                      current->ret_stack[index].ret);
144                 *ret = (unsigned long)panic;
145                 return;
146         }
147 #endif
148
149         *ret = current->ret_stack[index].ret;
150         trace->func = current->ret_stack[index].func;
151         trace->calltime = current->ret_stack[index].calltime;
152         trace->overrun = atomic_read(&current->trace_overrun);
153         trace->depth = index;
154 }
155
156 /*
157  * Send the trace to the ring-buffer.
158  * @return the original return address.
159  */
160 unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
161 {
162         struct ftrace_graph_ret trace;
163         unsigned long ret;
164
165         ftrace_pop_return_trace(&trace, &ret, frame_pointer);
166         trace.rettime = trace_clock_local();
167         ftrace_graph_return(&trace);
168         barrier();
169         current->curr_ret_stack--;
170
171         if (unlikely(!ret)) {
172                 ftrace_graph_stop();
173                 WARN_ON(1);
174                 /* Might as well panic. What else to do? */
175                 ret = (unsigned long)panic;
176         }
177
178         return ret;
179 }
180
181 static int __trace_graph_entry(struct trace_array *tr,
182                                 struct ftrace_graph_ent *trace,
183                                 unsigned long flags,
184                                 int pc)
185 {
186         struct ftrace_event_call *call = &event_funcgraph_entry;
187         struct ring_buffer_event *event;
188         struct ring_buffer *buffer = tr->buffer;
189         struct ftrace_graph_ent_entry *entry;
190
191         if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
192                 return 0;
193
194         event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
195                                           sizeof(*entry), flags, pc);
196         if (!event)
197                 return 0;
198         entry   = ring_buffer_event_data(event);
199         entry->graph_ent                        = *trace;
200         if (!filter_current_check_discard(buffer, call, entry, event))
201                 ring_buffer_unlock_commit(buffer, event);
202
203         return 1;
204 }
205
206 int trace_graph_entry(struct ftrace_graph_ent *trace)
207 {
208         struct trace_array *tr = graph_array;
209         struct trace_array_cpu *data;
210         unsigned long flags;
211         long disabled;
212         int ret;
213         int cpu;
214         int pc;
215
216         if (!ftrace_trace_task(current))
217                 return 0;
218
219         /* trace it when it is-nested-in or is a function enabled. */
220         if (!(trace->depth || ftrace_graph_addr(trace->func)))
221                 return 0;
222
223         local_irq_save(flags);
224         cpu = raw_smp_processor_id();
225         data = tr->data[cpu];
226         disabled = atomic_inc_return(&data->disabled);
227         if (likely(disabled == 1)) {
228                 pc = preempt_count();
229                 ret = __trace_graph_entry(tr, trace, flags, pc);
230         } else {
231                 ret = 0;
232         }
233
234         atomic_dec(&data->disabled);
235         local_irq_restore(flags);
236
237         return ret;
238 }
239
240 static void __trace_graph_return(struct trace_array *tr,
241                                 struct ftrace_graph_ret *trace,
242                                 unsigned long flags,
243                                 int pc)
244 {
245         struct ftrace_event_call *call = &event_funcgraph_exit;
246         struct ring_buffer_event *event;
247         struct ring_buffer *buffer = tr->buffer;
248         struct ftrace_graph_ret_entry *entry;
249
250         if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
251                 return;
252
253         event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
254                                           sizeof(*entry), flags, pc);
255         if (!event)
256                 return;
257         entry   = ring_buffer_event_data(event);
258         entry->ret                              = *trace;
259         if (!filter_current_check_discard(buffer, call, entry, event))
260                 ring_buffer_unlock_commit(buffer, event);
261 }
262
263 void trace_graph_return(struct ftrace_graph_ret *trace)
264 {
265         struct trace_array *tr = graph_array;
266         struct trace_array_cpu *data;
267         unsigned long flags;
268         long disabled;
269         int cpu;
270         int pc;
271
272         local_irq_save(flags);
273         cpu = raw_smp_processor_id();
274         data = tr->data[cpu];
275         disabled = atomic_inc_return(&data->disabled);
276         if (likely(disabled == 1)) {
277                 pc = preempt_count();
278                 __trace_graph_return(tr, trace, flags, pc);
279         }
280         atomic_dec(&data->disabled);
281         local_irq_restore(flags);
282 }
283
284 void set_graph_array(struct trace_array *tr)
285 {
286         graph_array = tr;
287
288         /* Make graph_array visible before we start tracing */
289
290         smp_mb();
291 }
292
293 static int graph_trace_init(struct trace_array *tr)
294 {
295         int ret;
296
297         set_graph_array(tr);
298         ret = register_ftrace_graph(&trace_graph_return,
299                                     &trace_graph_entry);
300         if (ret)
301                 return ret;
302         tracing_start_cmdline_record();
303
304         return 0;
305 }
306
307 static void graph_trace_reset(struct trace_array *tr)
308 {
309         tracing_stop_cmdline_record();
310         unregister_ftrace_graph();
311 }
312
313 static int max_bytes_for_cpu;
314
315 static enum print_line_t
316 print_graph_cpu(struct trace_seq *s, int cpu)
317 {
318         int ret;
319
320         /*
321          * Start with a space character - to make it stand out
322          * to the right a bit when trace output is pasted into
323          * email:
324          */
325         ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
326         if (!ret)
327                 return TRACE_TYPE_PARTIAL_LINE;
328
329         return TRACE_TYPE_HANDLED;
330 }
331
332 #define TRACE_GRAPH_PROCINFO_LENGTH     14
333
334 static enum print_line_t
335 print_graph_proc(struct trace_seq *s, pid_t pid)
336 {
337         char comm[TASK_COMM_LEN];
338         /* sign + log10(MAX_INT) + '\0' */
339         char pid_str[11];
340         int spaces = 0;
341         int ret;
342         int len;
343         int i;
344
345         trace_find_cmdline(pid, comm);
346         comm[7] = '\0';
347         sprintf(pid_str, "%d", pid);
348
349         /* 1 stands for the "-" character */
350         len = strlen(comm) + strlen(pid_str) + 1;
351
352         if (len < TRACE_GRAPH_PROCINFO_LENGTH)
353                 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
354
355         /* First spaces to align center */
356         for (i = 0; i < spaces / 2; i++) {
357                 ret = trace_seq_printf(s, " ");
358                 if (!ret)
359                         return TRACE_TYPE_PARTIAL_LINE;
360         }
361
362         ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
363         if (!ret)
364                 return TRACE_TYPE_PARTIAL_LINE;
365
366         /* Last spaces to align center */
367         for (i = 0; i < spaces - (spaces / 2); i++) {
368                 ret = trace_seq_printf(s, " ");
369                 if (!ret)
370                         return TRACE_TYPE_PARTIAL_LINE;
371         }
372         return TRACE_TYPE_HANDLED;
373 }
374
375
376 static enum print_line_t
377 print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
378 {
379         if (!trace_seq_putc(s, ' '))
380                 return 0;
381
382         return trace_print_lat_fmt(s, entry);
383 }
384
385 /* If the pid changed since the last trace, output this event */
386 static enum print_line_t
387 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
388 {
389         pid_t prev_pid;
390         pid_t *last_pid;
391         int ret;
392
393         if (!data)
394                 return TRACE_TYPE_HANDLED;
395
396         last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
397
398         if (*last_pid == pid)
399                 return TRACE_TYPE_HANDLED;
400
401         prev_pid = *last_pid;
402         *last_pid = pid;
403
404         if (prev_pid == -1)
405                 return TRACE_TYPE_HANDLED;
406 /*
407  * Context-switch trace line:
408
409  ------------------------------------------
410  | 1)  migration/0--1  =>  sshd-1755
411  ------------------------------------------
412
413  */
414         ret = trace_seq_printf(s,
415                 " ------------------------------------------\n");
416         if (!ret)
417                 return TRACE_TYPE_PARTIAL_LINE;
418
419         ret = print_graph_cpu(s, cpu);
420         if (ret == TRACE_TYPE_PARTIAL_LINE)
421                 return TRACE_TYPE_PARTIAL_LINE;
422
423         ret = print_graph_proc(s, prev_pid);
424         if (ret == TRACE_TYPE_PARTIAL_LINE)
425                 return TRACE_TYPE_PARTIAL_LINE;
426
427         ret = trace_seq_printf(s, " => ");
428         if (!ret)
429                 return TRACE_TYPE_PARTIAL_LINE;
430
431         ret = print_graph_proc(s, pid);
432         if (ret == TRACE_TYPE_PARTIAL_LINE)
433                 return TRACE_TYPE_PARTIAL_LINE;
434
435         ret = trace_seq_printf(s,
436                 "\n ------------------------------------------\n\n");
437         if (!ret)
438                 return TRACE_TYPE_PARTIAL_LINE;
439
440         return TRACE_TYPE_HANDLED;
441 }
442
443 static struct ftrace_graph_ret_entry *
444 get_return_for_leaf(struct trace_iterator *iter,
445                 struct ftrace_graph_ent_entry *curr)
446 {
447         struct fgraph_data *data = iter->private;
448         struct ring_buffer_iter *ring_iter = NULL;
449         struct ring_buffer_event *event;
450         struct ftrace_graph_ret_entry *next;
451
452         /*
453          * If the previous output failed to write to the seq buffer,
454          * then we just reuse the data from before.
455          */
456         if (data && data->failed) {
457                 curr = &data->ent;
458                 next = &data->ret;
459         } else {
460
461                 ring_iter = iter->buffer_iter[iter->cpu];
462
463                 /* First peek to compare current entry and the next one */
464                 if (ring_iter)
465                         event = ring_buffer_iter_peek(ring_iter, NULL);
466                 else {
467                         /*
468                          * We need to consume the current entry to see
469                          * the next one.
470                          */
471                         ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
472                         event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
473                                                  NULL);
474                 }
475
476                 if (!event)
477                         return NULL;
478
479                 next = ring_buffer_event_data(event);
480
481                 if (data) {
482                         /*
483                          * Save current and next entries for later reference
484                          * if the output fails.
485                          */
486                         data->ent = *curr;
487                         data->ret = *next;
488                 }
489         }
490
491         if (next->ent.type != TRACE_GRAPH_RET)
492                 return NULL;
493
494         if (curr->ent.pid != next->ent.pid ||
495                         curr->graph_ent.func != next->ret.func)
496                 return NULL;
497
498         /* this is a leaf, now advance the iterator */
499         if (ring_iter)
500                 ring_buffer_read(ring_iter, NULL);
501
502         return next;
503 }
504
505 /* Signal a overhead of time execution to the output */
506 static int
507 print_graph_overhead(unsigned long long duration, struct trace_seq *s)
508 {
509         /* If duration disappear, we don't need anything */
510         if (!(tracer_flags.val & TRACE_GRAPH_PRINT_DURATION))
511                 return 1;
512
513         /* Non nested entry or return */
514         if (duration == -1)
515                 return trace_seq_printf(s, "  ");
516
517         if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
518                 /* Duration exceeded 100 msecs */
519                 if (duration > 100000ULL)
520                         return trace_seq_printf(s, "! ");
521
522                 /* Duration exceeded 10 msecs */
523                 if (duration > 10000ULL)
524                         return trace_seq_printf(s, "+ ");
525         }
526
527         return trace_seq_printf(s, "  ");
528 }
529
530 static int print_graph_abs_time(u64 t, struct trace_seq *s)
531 {
532         unsigned long usecs_rem;
533
534         usecs_rem = do_div(t, NSEC_PER_SEC);
535         usecs_rem /= 1000;
536
537         return trace_seq_printf(s, "%5lu.%06lu |  ",
538                         (unsigned long)t, usecs_rem);
539 }
540
541 static enum print_line_t
542 print_graph_irq(struct trace_iterator *iter, unsigned long addr,
543                 enum trace_type type, int cpu, pid_t pid)
544 {
545         int ret;
546         struct trace_seq *s = &iter->seq;
547
548         if (addr < (unsigned long)__irqentry_text_start ||
549                 addr >= (unsigned long)__irqentry_text_end)
550                 return TRACE_TYPE_UNHANDLED;
551
552         /* Absolute time */
553         if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
554                 ret = print_graph_abs_time(iter->ts, s);
555                 if (!ret)
556                         return TRACE_TYPE_PARTIAL_LINE;
557         }
558
559         /* Cpu */
560         if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
561                 ret = print_graph_cpu(s, cpu);
562                 if (ret == TRACE_TYPE_PARTIAL_LINE)
563                         return TRACE_TYPE_PARTIAL_LINE;
564         }
565
566         /* Proc */
567         if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
568                 ret = print_graph_proc(s, pid);
569                 if (ret == TRACE_TYPE_PARTIAL_LINE)
570                         return TRACE_TYPE_PARTIAL_LINE;
571                 ret = trace_seq_printf(s, " | ");
572                 if (!ret)
573                         return TRACE_TYPE_PARTIAL_LINE;
574         }
575
576         /* No overhead */
577         ret = print_graph_overhead(-1, s);
578         if (!ret)
579                 return TRACE_TYPE_PARTIAL_LINE;
580
581         if (type == TRACE_GRAPH_ENT)
582                 ret = trace_seq_printf(s, "==========>");
583         else
584                 ret = trace_seq_printf(s, "<==========");
585
586         if (!ret)
587                 return TRACE_TYPE_PARTIAL_LINE;
588
589         /* Don't close the duration column if haven't one */
590         if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
591                 trace_seq_printf(s, " |");
592         ret = trace_seq_printf(s, "\n");
593
594         if (!ret)
595                 return TRACE_TYPE_PARTIAL_LINE;
596         return TRACE_TYPE_HANDLED;
597 }
598
599 enum print_line_t
600 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
601 {
602         unsigned long nsecs_rem = do_div(duration, 1000);
603         /* log10(ULONG_MAX) + '\0' */
604         char msecs_str[21];
605         char nsecs_str[5];
606         int ret, len;
607         int i;
608
609         sprintf(msecs_str, "%lu", (unsigned long) duration);
610
611         /* Print msecs */
612         ret = trace_seq_printf(s, "%s", msecs_str);
613         if (!ret)
614                 return TRACE_TYPE_PARTIAL_LINE;
615
616         len = strlen(msecs_str);
617
618         /* Print nsecs (we don't want to exceed 7 numbers) */
619         if (len < 7) {
620                 snprintf(nsecs_str, 8 - len, "%03lu", nsecs_rem);
621                 ret = trace_seq_printf(s, ".%s", nsecs_str);
622                 if (!ret)
623                         return TRACE_TYPE_PARTIAL_LINE;
624                 len += strlen(nsecs_str);
625         }
626
627         ret = trace_seq_printf(s, " us ");
628         if (!ret)
629                 return TRACE_TYPE_PARTIAL_LINE;
630
631         /* Print remaining spaces to fit the row's width */
632         for (i = len; i < 7; i++) {
633                 ret = trace_seq_printf(s, " ");
634                 if (!ret)
635                         return TRACE_TYPE_PARTIAL_LINE;
636         }
637         return TRACE_TYPE_HANDLED;
638 }
639
640 static enum print_line_t
641 print_graph_duration(unsigned long long duration, struct trace_seq *s)
642 {
643         int ret;
644
645         ret = trace_print_graph_duration(duration, s);
646         if (ret != TRACE_TYPE_HANDLED)
647                 return ret;
648
649         ret = trace_seq_printf(s, "|  ");
650         if (!ret)
651                 return TRACE_TYPE_PARTIAL_LINE;
652
653         return TRACE_TYPE_HANDLED;
654 }
655
656 /* Case of a leaf function on its call entry */
657 static enum print_line_t
658 print_graph_entry_leaf(struct trace_iterator *iter,
659                 struct ftrace_graph_ent_entry *entry,
660                 struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s)
661 {
662         struct fgraph_data *data = iter->private;
663         struct ftrace_graph_ret *graph_ret;
664         struct ftrace_graph_ent *call;
665         unsigned long long duration;
666         int ret;
667         int i;
668
669         graph_ret = &ret_entry->ret;
670         call = &entry->graph_ent;
671         duration = graph_ret->rettime - graph_ret->calltime;
672
673         if (data) {
674                 struct fgraph_cpu_data *cpu_data;
675                 int cpu = iter->cpu;
676
677                 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
678
679                 /*
680                  * Comments display at + 1 to depth. Since
681                  * this is a leaf function, keep the comments
682                  * equal to this depth.
683                  */
684                 cpu_data->depth = call->depth - 1;
685
686                 /* No need to keep this function around for this depth */
687                 if (call->depth < FTRACE_RETFUNC_DEPTH)
688                         cpu_data->enter_funcs[call->depth] = 0;
689         }
690
691         /* Overhead */
692         ret = print_graph_overhead(duration, s);
693         if (!ret)
694                 return TRACE_TYPE_PARTIAL_LINE;
695
696         /* Duration */
697         if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
698                 ret = print_graph_duration(duration, s);
699                 if (ret == TRACE_TYPE_PARTIAL_LINE)
700                         return TRACE_TYPE_PARTIAL_LINE;
701         }
702
703         /* Function */
704         for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
705                 ret = trace_seq_printf(s, " ");
706                 if (!ret)
707                         return TRACE_TYPE_PARTIAL_LINE;
708         }
709
710         ret = trace_seq_printf(s, "%ps();\n", (void *)call->func);
711         if (!ret)
712                 return TRACE_TYPE_PARTIAL_LINE;
713
714         return TRACE_TYPE_HANDLED;
715 }
716
717 static enum print_line_t
718 print_graph_entry_nested(struct trace_iterator *iter,
719                          struct ftrace_graph_ent_entry *entry,
720                          struct trace_seq *s, int cpu)
721 {
722         struct ftrace_graph_ent *call = &entry->graph_ent;
723         struct fgraph_data *data = iter->private;
724         int ret;
725         int i;
726
727         if (data) {
728                 struct fgraph_cpu_data *cpu_data;
729                 int cpu = iter->cpu;
730
731                 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
732                 cpu_data->depth = call->depth;
733
734                 /* Save this function pointer to see if the exit matches */
735                 if (call->depth < FTRACE_RETFUNC_DEPTH)
736                         cpu_data->enter_funcs[call->depth] = call->func;
737         }
738
739         /* No overhead */
740         ret = print_graph_overhead(-1, s);
741         if (!ret)
742                 return TRACE_TYPE_PARTIAL_LINE;
743
744         /* No time */
745         if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
746                 ret = trace_seq_printf(s, "            |  ");
747                 if (!ret)
748                         return TRACE_TYPE_PARTIAL_LINE;
749         }
750
751         /* Function */
752         for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
753                 ret = trace_seq_printf(s, " ");
754                 if (!ret)
755                         return TRACE_TYPE_PARTIAL_LINE;
756         }
757
758         ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func);
759         if (!ret)
760                 return TRACE_TYPE_PARTIAL_LINE;
761
762         /*
763          * we already consumed the current entry to check the next one
764          * and see if this is a leaf.
765          */
766         return TRACE_TYPE_NO_CONSUME;
767 }
768
769 static enum print_line_t
770 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
771                      int type, unsigned long addr)
772 {
773         struct fgraph_data *data = iter->private;
774         struct trace_entry *ent = iter->ent;
775         int cpu = iter->cpu;
776         int ret;
777
778         /* Pid */
779         if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
780                 return TRACE_TYPE_PARTIAL_LINE;
781
782         if (type) {
783                 /* Interrupt */
784                 ret = print_graph_irq(iter, addr, type, cpu, ent->pid);
785                 if (ret == TRACE_TYPE_PARTIAL_LINE)
786                         return TRACE_TYPE_PARTIAL_LINE;
787         }
788
789         /* Absolute time */
790         if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
791                 ret = print_graph_abs_time(iter->ts, s);
792                 if (!ret)
793                         return TRACE_TYPE_PARTIAL_LINE;
794         }
795
796         /* Cpu */
797         if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
798                 ret = print_graph_cpu(s, cpu);
799                 if (ret == TRACE_TYPE_PARTIAL_LINE)
800                         return TRACE_TYPE_PARTIAL_LINE;
801         }
802
803         /* Proc */
804         if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
805                 ret = print_graph_proc(s, ent->pid);
806                 if (ret == TRACE_TYPE_PARTIAL_LINE)
807                         return TRACE_TYPE_PARTIAL_LINE;
808
809                 ret = trace_seq_printf(s, " | ");
810                 if (!ret)
811                         return TRACE_TYPE_PARTIAL_LINE;
812         }
813
814         /* Latency format */
815         if (trace_flags & TRACE_ITER_LATENCY_FMT) {
816                 ret = print_graph_lat_fmt(s, ent);
817                 if (ret == TRACE_TYPE_PARTIAL_LINE)
818                         return TRACE_TYPE_PARTIAL_LINE;
819         }
820
821         return 0;
822 }
823
824 static enum print_line_t
825 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
826                         struct trace_iterator *iter)
827 {
828         struct fgraph_data *data = iter->private;
829         struct ftrace_graph_ent *call = &field->graph_ent;
830         struct ftrace_graph_ret_entry *leaf_ret;
831         static enum print_line_t ret;
832         int cpu = iter->cpu;
833
834         if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func))
835                 return TRACE_TYPE_PARTIAL_LINE;
836
837         leaf_ret = get_return_for_leaf(iter, field);
838         if (leaf_ret)
839                 ret = print_graph_entry_leaf(iter, field, leaf_ret, s);
840         else
841                 ret = print_graph_entry_nested(iter, field, s, cpu);
842
843         if (data) {
844                 /*
845                  * If we failed to write our output, then we need to make
846                  * note of it. Because we already consumed our entry.
847                  */
848                 if (s->full) {
849                         data->failed = 1;
850                         data->cpu = cpu;
851                 } else
852                         data->failed = 0;
853         }
854
855         return ret;
856 }
857
858 static enum print_line_t
859 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
860                    struct trace_entry *ent, struct trace_iterator *iter)
861 {
862         unsigned long long duration = trace->rettime - trace->calltime;
863         struct fgraph_data *data = iter->private;
864         pid_t pid = ent->pid;
865         int cpu = iter->cpu;
866         int func_match = 1;
867         int ret;
868         int i;
869
870         if (data) {
871                 struct fgraph_cpu_data *cpu_data;
872                 int cpu = iter->cpu;
873
874                 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
875
876                 /*
877                  * Comments display at + 1 to depth. This is the
878                  * return from a function, we now want the comments
879                  * to display at the same level of the bracket.
880                  */
881                 cpu_data->depth = trace->depth - 1;
882
883                 if (trace->depth < FTRACE_RETFUNC_DEPTH) {
884                         if (cpu_data->enter_funcs[trace->depth] != trace->func)
885                                 func_match = 0;
886                         cpu_data->enter_funcs[trace->depth] = 0;
887                 }
888         }
889
890         if (print_graph_prologue(iter, s, 0, 0))
891                 return TRACE_TYPE_PARTIAL_LINE;
892
893         /* Overhead */
894         ret = print_graph_overhead(duration, s);
895         if (!ret)
896                 return TRACE_TYPE_PARTIAL_LINE;
897
898         /* Duration */
899         if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
900                 ret = print_graph_duration(duration, s);
901                 if (ret == TRACE_TYPE_PARTIAL_LINE)
902                         return TRACE_TYPE_PARTIAL_LINE;
903         }
904
905         /* Closing brace */
906         for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
907                 ret = trace_seq_printf(s, " ");
908                 if (!ret)
909                         return TRACE_TYPE_PARTIAL_LINE;
910         }
911
912         /*
913          * If the return function does not have a matching entry,
914          * then the entry was lost. Instead of just printing
915          * the '}' and letting the user guess what function this
916          * belongs to, write out the function name.
917          */
918         if (func_match) {
919                 ret = trace_seq_printf(s, "}\n");
920                 if (!ret)
921                         return TRACE_TYPE_PARTIAL_LINE;
922         } else {
923                 ret = trace_seq_printf(s, "} (%ps)\n", (void *)trace->func);
924                 if (!ret)
925                         return TRACE_TYPE_PARTIAL_LINE;
926         }
927
928         /* Overrun */
929         if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
930                 ret = trace_seq_printf(s, " (Overruns: %lu)\n",
931                                         trace->overrun);
932                 if (!ret)
933                         return TRACE_TYPE_PARTIAL_LINE;
934         }
935
936         ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, cpu, pid);
937         if (ret == TRACE_TYPE_PARTIAL_LINE)
938                 return TRACE_TYPE_PARTIAL_LINE;
939
940         return TRACE_TYPE_HANDLED;
941 }
942
943 static enum print_line_t
944 print_graph_comment(struct trace_seq *s,  struct trace_entry *ent,
945                     struct trace_iterator *iter)
946 {
947         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
948         struct fgraph_data *data = iter->private;
949         struct trace_event *event;
950         int depth = 0;
951         int ret;
952         int i;
953
954         if (data)
955                 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
956
957         if (print_graph_prologue(iter, s, 0, 0))
958                 return TRACE_TYPE_PARTIAL_LINE;
959
960         /* No overhead */
961         ret = print_graph_overhead(-1, s);
962         if (!ret)
963                 return TRACE_TYPE_PARTIAL_LINE;
964
965         /* No time */
966         if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
967                 ret = trace_seq_printf(s, "            |  ");
968                 if (!ret)
969                         return TRACE_TYPE_PARTIAL_LINE;
970         }
971
972         /* Indentation */
973         if (depth > 0)
974                 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
975                         ret = trace_seq_printf(s, " ");
976                         if (!ret)
977                                 return TRACE_TYPE_PARTIAL_LINE;
978                 }
979
980         /* The comment */
981         ret = trace_seq_printf(s, "/* ");
982         if (!ret)
983                 return TRACE_TYPE_PARTIAL_LINE;
984
985         switch (iter->ent->type) {
986         case TRACE_BPRINT:
987                 ret = trace_print_bprintk_msg_only(iter);
988                 if (ret != TRACE_TYPE_HANDLED)
989                         return ret;
990                 break;
991         case TRACE_PRINT:
992                 ret = trace_print_printk_msg_only(iter);
993                 if (ret != TRACE_TYPE_HANDLED)
994                         return ret;
995                 break;
996         default:
997                 event = ftrace_find_event(ent->type);
998                 if (!event)
999                         return TRACE_TYPE_UNHANDLED;
1000
1001                 ret = event->trace(iter, sym_flags);
1002                 if (ret != TRACE_TYPE_HANDLED)
1003                         return ret;
1004         }
1005
1006         /* Strip ending newline */
1007         if (s->buffer[s->len - 1] == '\n') {
1008                 s->buffer[s->len - 1] = '\0';
1009                 s->len--;
1010         }
1011
1012         ret = trace_seq_printf(s, " */\n");
1013         if (!ret)
1014                 return TRACE_TYPE_PARTIAL_LINE;
1015
1016         return TRACE_TYPE_HANDLED;
1017 }
1018
1019
1020 enum print_line_t
1021 print_graph_function(struct trace_iterator *iter)
1022 {
1023         struct ftrace_graph_ent_entry *field;
1024         struct fgraph_data *data = iter->private;
1025         struct trace_entry *entry = iter->ent;
1026         struct trace_seq *s = &iter->seq;
1027         int cpu = iter->cpu;
1028         int ret;
1029
1030         if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1031                 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1032                 return TRACE_TYPE_HANDLED;
1033         }
1034
1035         /*
1036          * If the last output failed, there's a possibility we need
1037          * to print out the missing entry which would never go out.
1038          */
1039         if (data && data->failed) {
1040                 field = &data->ent;
1041                 iter->cpu = data->cpu;
1042                 ret = print_graph_entry(field, s, iter);
1043                 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1044                         per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1045                         ret = TRACE_TYPE_NO_CONSUME;
1046                 }
1047                 iter->cpu = cpu;
1048                 return ret;
1049         }
1050
1051         switch (entry->type) {
1052         case TRACE_GRAPH_ENT: {
1053                 /*
1054                  * print_graph_entry() may consume the current event,
1055                  * thus @field may become invalid, so we need to save it.
1056                  * sizeof(struct ftrace_graph_ent_entry) is very small,
1057                  * it can be safely saved at the stack.
1058                  */
1059                 struct ftrace_graph_ent_entry saved;
1060                 trace_assign_type(field, entry);
1061                 saved = *field;
1062                 return print_graph_entry(&saved, s, iter);
1063         }
1064         case TRACE_GRAPH_RET: {
1065                 struct ftrace_graph_ret_entry *field;
1066                 trace_assign_type(field, entry);
1067                 return print_graph_return(&field->ret, s, entry, iter);
1068         }
1069         default:
1070                 return print_graph_comment(s, entry, iter);
1071         }
1072
1073         return TRACE_TYPE_HANDLED;
1074 }
1075
1076 static void print_lat_header(struct seq_file *s)
1077 {
1078         static const char spaces[] = "                " /* 16 spaces */
1079                 "    "                                  /* 4 spaces */
1080                 "                 ";                    /* 17 spaces */
1081         int size = 0;
1082
1083         if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
1084                 size += 16;
1085         if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
1086                 size += 4;
1087         if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
1088                 size += 17;
1089
1090         seq_printf(s, "#%.*s  _-----=> irqs-off        \n", size, spaces);
1091         seq_printf(s, "#%.*s / _----=> need-resched    \n", size, spaces);
1092         seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1093         seq_printf(s, "#%.*s|| / _--=> preempt-depth   \n", size, spaces);
1094         seq_printf(s, "#%.*s||| / _-=> lock-depth      \n", size, spaces);
1095         seq_printf(s, "#%.*s|||| /                     \n", size, spaces);
1096 }
1097
1098 static void print_graph_headers(struct seq_file *s)
1099 {
1100         int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
1101
1102         if (lat)
1103                 print_lat_header(s);
1104
1105         /* 1st line */
1106         seq_printf(s, "#");
1107         if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
1108                 seq_printf(s, "     TIME       ");
1109         if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
1110                 seq_printf(s, " CPU");
1111         if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
1112                 seq_printf(s, "  TASK/PID       ");
1113         if (lat)
1114                 seq_printf(s, "|||||");
1115         if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
1116                 seq_printf(s, "  DURATION   ");
1117         seq_printf(s, "               FUNCTION CALLS\n");
1118
1119         /* 2nd line */
1120         seq_printf(s, "#");
1121         if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
1122                 seq_printf(s, "      |         ");
1123         if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
1124                 seq_printf(s, " |  ");
1125         if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
1126                 seq_printf(s, "   |    |        ");
1127         if (lat)
1128                 seq_printf(s, "|||||");
1129         if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
1130                 seq_printf(s, "   |   |      ");
1131         seq_printf(s, "               |   |   |   |\n");
1132 }
1133
1134 static void graph_trace_open(struct trace_iterator *iter)
1135 {
1136         /* pid and depth on the last trace processed */
1137         struct fgraph_data *data;
1138         int cpu;
1139
1140         iter->private = NULL;
1141
1142         data = kzalloc(sizeof(*data), GFP_KERNEL);
1143         if (!data)
1144                 goto out_err;
1145
1146         data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
1147         if (!data->cpu_data)
1148                 goto out_err_free;
1149
1150         for_each_possible_cpu(cpu) {
1151                 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1152                 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1153                 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1154                 *pid = -1;
1155                 *depth = 0;
1156                 *ignore = 0;
1157         }
1158
1159         iter->private = data;
1160
1161         return;
1162
1163  out_err_free:
1164         kfree(data);
1165  out_err:
1166         pr_warning("function graph tracer: not enough memory\n");
1167 }
1168
1169 static void graph_trace_close(struct trace_iterator *iter)
1170 {
1171         struct fgraph_data *data = iter->private;
1172
1173         if (data) {
1174                 free_percpu(data->cpu_data);
1175                 kfree(data);
1176         }
1177 }
1178
1179 static struct tracer graph_trace __read_mostly = {
1180         .name           = "function_graph",
1181         .open           = graph_trace_open,
1182         .pipe_open      = graph_trace_open,
1183         .close          = graph_trace_close,
1184         .pipe_close     = graph_trace_close,
1185         .wait_pipe      = poll_wait_pipe,
1186         .init           = graph_trace_init,
1187         .reset          = graph_trace_reset,
1188         .print_line     = print_graph_function,
1189         .print_header   = print_graph_headers,
1190         .flags          = &tracer_flags,
1191 #ifdef CONFIG_FTRACE_SELFTEST
1192         .selftest       = trace_selftest_startup_function_graph,
1193 #endif
1194 };
1195
1196 static __init int init_graph_trace(void)
1197 {
1198         max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1199
1200         return register_tracer(&graph_trace);
1201 }
1202
1203 device_initcall(init_graph_trace);