ee791a9650c5b1dd03f6d4d8a518f9e8c827e86d
[safe/jmp/linux-2.6] / kernel / trace / trace_functions_graph.c
1 /*
2  *
3  * Function graph tracer.
4  * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5  * Mostly borrowed from function tracer which
6  * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7  *
8  */
9 #include <linux/debugfs.h>
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
12 #include <linux/fs.h>
13
14 #include "trace.h"
15 #include "trace_output.h"
16
17 struct fgraph_data {
18         pid_t           last_pid;
19         int             depth;
20 };
21
22 #define TRACE_GRAPH_INDENT      2
23
24 /* Flag options */
25 #define TRACE_GRAPH_PRINT_OVERRUN       0x1
26 #define TRACE_GRAPH_PRINT_CPU           0x2
27 #define TRACE_GRAPH_PRINT_OVERHEAD      0x4
28 #define TRACE_GRAPH_PRINT_PROC          0x8
29 #define TRACE_GRAPH_PRINT_DURATION      0x10
30 #define TRACE_GRAPH_PRINT_ABS_TIME      0X20
31
32 static struct tracer_opt trace_opts[] = {
33         /* Display overruns? (for self-debug purpose) */
34         { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
35         /* Display CPU ? */
36         { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
37         /* Display Overhead ? */
38         { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
39         /* Display proc name/pid */
40         { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
41         /* Display duration of execution */
42         { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
43         /* Display absolute time of an entry */
44         { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
45         { } /* Empty entry */
46 };
47
48 static struct tracer_flags tracer_flags = {
49         /* Don't display overruns and proc by default */
50         .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
51                TRACE_GRAPH_PRINT_DURATION,
52         .opts = trace_opts
53 };
54
55 static struct trace_array *graph_array;
56
57
58 /* Add a function return address to the trace stack on thread info.*/
59 int
60 ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
61                          unsigned long frame_pointer)
62 {
63         unsigned long long calltime;
64         int index;
65
66         if (!current->ret_stack)
67                 return -EBUSY;
68
69         /*
70          * We must make sure the ret_stack is tested before we read
71          * anything else.
72          */
73         smp_rmb();
74
75         /* The return trace stack is full */
76         if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
77                 atomic_inc(&current->trace_overrun);
78                 return -EBUSY;
79         }
80
81         calltime = trace_clock_local();
82
83         index = ++current->curr_ret_stack;
84         barrier();
85         current->ret_stack[index].ret = ret;
86         current->ret_stack[index].func = func;
87         current->ret_stack[index].calltime = calltime;
88         current->ret_stack[index].subtime = 0;
89         current->ret_stack[index].fp = frame_pointer;
90         *depth = index;
91
92         return 0;
93 }
94
95 /* Retrieve a function return address to the trace stack on thread info.*/
96 static void
97 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
98                         unsigned long frame_pointer)
99 {
100         int index;
101
102         index = current->curr_ret_stack;
103
104         if (unlikely(index < 0)) {
105                 ftrace_graph_stop();
106                 WARN_ON(1);
107                 /* Might as well panic, otherwise we have no where to go */
108                 *ret = (unsigned long)panic;
109                 return;
110         }
111
112 #ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
113         /*
114          * The arch may choose to record the frame pointer used
115          * and check it here to make sure that it is what we expect it
116          * to be. If gcc does not set the place holder of the return
117          * address in the frame pointer, and does a copy instead, then
118          * the function graph trace will fail. This test detects this
119          * case.
120          *
121          * Currently, x86_32 with optimize for size (-Os) makes the latest
122          * gcc do the above.
123          */
124         if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
125                 ftrace_graph_stop();
126                 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
127                      "  from func %pF return to %lx\n",
128                      current->ret_stack[index].fp,
129                      frame_pointer,
130                      (void *)current->ret_stack[index].func,
131                      current->ret_stack[index].ret);
132                 *ret = (unsigned long)panic;
133                 return;
134         }
135 #endif
136
137         *ret = current->ret_stack[index].ret;
138         trace->func = current->ret_stack[index].func;
139         trace->calltime = current->ret_stack[index].calltime;
140         trace->overrun = atomic_read(&current->trace_overrun);
141         trace->depth = index;
142 }
143
144 /*
145  * Send the trace to the ring-buffer.
146  * @return the original return address.
147  */
148 unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
149 {
150         struct ftrace_graph_ret trace;
151         unsigned long ret;
152
153         ftrace_pop_return_trace(&trace, &ret, frame_pointer);
154         trace.rettime = trace_clock_local();
155         ftrace_graph_return(&trace);
156         barrier();
157         current->curr_ret_stack--;
158
159         if (unlikely(!ret)) {
160                 ftrace_graph_stop();
161                 WARN_ON(1);
162                 /* Might as well panic. What else to do? */
163                 ret = (unsigned long)panic;
164         }
165
166         return ret;
167 }
168
169 static int __trace_graph_entry(struct trace_array *tr,
170                                 struct ftrace_graph_ent *trace,
171                                 unsigned long flags,
172                                 int pc)
173 {
174         struct ftrace_event_call *call = &event_funcgraph_entry;
175         struct ring_buffer_event *event;
176         struct ring_buffer *buffer = tr->buffer;
177         struct ftrace_graph_ent_entry *entry;
178
179         if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
180                 return 0;
181
182         event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
183                                           sizeof(*entry), flags, pc);
184         if (!event)
185                 return 0;
186         entry   = ring_buffer_event_data(event);
187         entry->graph_ent                        = *trace;
188         if (!filter_current_check_discard(buffer, call, entry, event))
189                 ring_buffer_unlock_commit(buffer, event);
190
191         return 1;
192 }
193
194 int trace_graph_entry(struct ftrace_graph_ent *trace)
195 {
196         struct trace_array *tr = graph_array;
197         struct trace_array_cpu *data;
198         unsigned long flags;
199         long disabled;
200         int ret;
201         int cpu;
202         int pc;
203
204         if (unlikely(!tr))
205                 return 0;
206
207         if (!ftrace_trace_task(current))
208                 return 0;
209
210         if (!ftrace_graph_addr(trace->func))
211                 return 0;
212
213         local_irq_save(flags);
214         cpu = raw_smp_processor_id();
215         data = tr->data[cpu];
216         disabled = atomic_inc_return(&data->disabled);
217         if (likely(disabled == 1)) {
218                 pc = preempt_count();
219                 ret = __trace_graph_entry(tr, trace, flags, pc);
220         } else {
221                 ret = 0;
222         }
223         /* Only do the atomic if it is not already set */
224         if (!test_tsk_trace_graph(current))
225                 set_tsk_trace_graph(current);
226
227         atomic_dec(&data->disabled);
228         local_irq_restore(flags);
229
230         return ret;
231 }
232
233 static void __trace_graph_return(struct trace_array *tr,
234                                 struct ftrace_graph_ret *trace,
235                                 unsigned long flags,
236                                 int pc)
237 {
238         struct ftrace_event_call *call = &event_funcgraph_exit;
239         struct ring_buffer_event *event;
240         struct ring_buffer *buffer = tr->buffer;
241         struct ftrace_graph_ret_entry *entry;
242
243         if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
244                 return;
245
246         event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
247                                           sizeof(*entry), flags, pc);
248         if (!event)
249                 return;
250         entry   = ring_buffer_event_data(event);
251         entry->ret                              = *trace;
252         if (!filter_current_check_discard(buffer, call, entry, event))
253                 ring_buffer_unlock_commit(buffer, event);
254 }
255
256 void trace_graph_return(struct ftrace_graph_ret *trace)
257 {
258         struct trace_array *tr = graph_array;
259         struct trace_array_cpu *data;
260         unsigned long flags;
261         long disabled;
262         int cpu;
263         int pc;
264
265         local_irq_save(flags);
266         cpu = raw_smp_processor_id();
267         data = tr->data[cpu];
268         disabled = atomic_inc_return(&data->disabled);
269         if (likely(disabled == 1)) {
270                 pc = preempt_count();
271                 __trace_graph_return(tr, trace, flags, pc);
272         }
273         if (!trace->depth)
274                 clear_tsk_trace_graph(current);
275         atomic_dec(&data->disabled);
276         local_irq_restore(flags);
277 }
278
279 static int graph_trace_init(struct trace_array *tr)
280 {
281         int ret;
282
283         graph_array = tr;
284         ret = register_ftrace_graph(&trace_graph_return,
285                                     &trace_graph_entry);
286         if (ret)
287                 return ret;
288         tracing_start_cmdline_record();
289
290         return 0;
291 }
292
293 void set_graph_array(struct trace_array *tr)
294 {
295         graph_array = tr;
296 }
297
298 static void graph_trace_reset(struct trace_array *tr)
299 {
300         tracing_stop_cmdline_record();
301         unregister_ftrace_graph();
302 }
303
304 static int max_bytes_for_cpu;
305
306 static enum print_line_t
307 print_graph_cpu(struct trace_seq *s, int cpu)
308 {
309         int ret;
310
311         /*
312          * Start with a space character - to make it stand out
313          * to the right a bit when trace output is pasted into
314          * email:
315          */
316         ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
317         if (!ret)
318                 return TRACE_TYPE_PARTIAL_LINE;
319
320         return TRACE_TYPE_HANDLED;
321 }
322
323 #define TRACE_GRAPH_PROCINFO_LENGTH     14
324
325 static enum print_line_t
326 print_graph_proc(struct trace_seq *s, pid_t pid)
327 {
328         char comm[TASK_COMM_LEN];
329         /* sign + log10(MAX_INT) + '\0' */
330         char pid_str[11];
331         int spaces = 0;
332         int ret;
333         int len;
334         int i;
335
336         trace_find_cmdline(pid, comm);
337         comm[7] = '\0';
338         sprintf(pid_str, "%d", pid);
339
340         /* 1 stands for the "-" character */
341         len = strlen(comm) + strlen(pid_str) + 1;
342
343         if (len < TRACE_GRAPH_PROCINFO_LENGTH)
344                 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
345
346         /* First spaces to align center */
347         for (i = 0; i < spaces / 2; i++) {
348                 ret = trace_seq_printf(s, " ");
349                 if (!ret)
350                         return TRACE_TYPE_PARTIAL_LINE;
351         }
352
353         ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
354         if (!ret)
355                 return TRACE_TYPE_PARTIAL_LINE;
356
357         /* Last spaces to align center */
358         for (i = 0; i < spaces - (spaces / 2); i++) {
359                 ret = trace_seq_printf(s, " ");
360                 if (!ret)
361                         return TRACE_TYPE_PARTIAL_LINE;
362         }
363         return TRACE_TYPE_HANDLED;
364 }
365
366
367 static enum print_line_t
368 print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
369 {
370         int hardirq, softirq;
371
372         hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
373         softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
374
375         if (!trace_seq_printf(s, " %c%c%c",
376                               (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
377                                 (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ?
378                                   'X' : '.',
379                               (entry->flags & TRACE_FLAG_NEED_RESCHED) ?
380                                 'N' : '.',
381                               (hardirq && softirq) ? 'H' :
382                                 hardirq ? 'h' : softirq ? 's' : '.'))
383                 return 0;
384
385         if (entry->preempt_count)
386                 return trace_seq_printf(s, "%x", entry->preempt_count);
387         return trace_seq_puts(s, ".");
388 }
389
390 /* If the pid changed since the last trace, output this event */
391 static enum print_line_t
392 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
393 {
394         pid_t prev_pid;
395         pid_t *last_pid;
396         int ret;
397
398         if (!data)
399                 return TRACE_TYPE_HANDLED;
400
401         last_pid = &(per_cpu_ptr(data, cpu)->last_pid);
402
403         if (*last_pid == pid)
404                 return TRACE_TYPE_HANDLED;
405
406         prev_pid = *last_pid;
407         *last_pid = pid;
408
409         if (prev_pid == -1)
410                 return TRACE_TYPE_HANDLED;
411 /*
412  * Context-switch trace line:
413
414  ------------------------------------------
415  | 1)  migration/0--1  =>  sshd-1755
416  ------------------------------------------
417
418  */
419         ret = trace_seq_printf(s,
420                 " ------------------------------------------\n");
421         if (!ret)
422                 return TRACE_TYPE_PARTIAL_LINE;
423
424         ret = print_graph_cpu(s, cpu);
425         if (ret == TRACE_TYPE_PARTIAL_LINE)
426                 return TRACE_TYPE_PARTIAL_LINE;
427
428         ret = print_graph_proc(s, prev_pid);
429         if (ret == TRACE_TYPE_PARTIAL_LINE)
430                 return TRACE_TYPE_PARTIAL_LINE;
431
432         ret = trace_seq_printf(s, " => ");
433         if (!ret)
434                 return TRACE_TYPE_PARTIAL_LINE;
435
436         ret = print_graph_proc(s, pid);
437         if (ret == TRACE_TYPE_PARTIAL_LINE)
438                 return TRACE_TYPE_PARTIAL_LINE;
439
440         ret = trace_seq_printf(s,
441                 "\n ------------------------------------------\n\n");
442         if (!ret)
443                 return TRACE_TYPE_PARTIAL_LINE;
444
445         return TRACE_TYPE_HANDLED;
446 }
447
448 static struct ftrace_graph_ret_entry *
449 get_return_for_leaf(struct trace_iterator *iter,
450                 struct ftrace_graph_ent_entry *curr)
451 {
452         struct ring_buffer_iter *ring_iter;
453         struct ring_buffer_event *event;
454         struct ftrace_graph_ret_entry *next;
455
456         ring_iter = iter->buffer_iter[iter->cpu];
457
458         /* First peek to compare current entry and the next one */
459         if (ring_iter)
460                 event = ring_buffer_iter_peek(ring_iter, NULL);
461         else {
462         /* We need to consume the current entry to see the next one */
463                 ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
464                 event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
465                                         NULL);
466         }
467
468         if (!event)
469                 return NULL;
470
471         next = ring_buffer_event_data(event);
472
473         if (next->ent.type != TRACE_GRAPH_RET)
474                 return NULL;
475
476         if (curr->ent.pid != next->ent.pid ||
477                         curr->graph_ent.func != next->ret.func)
478                 return NULL;
479
480         /* this is a leaf, now advance the iterator */
481         if (ring_iter)
482                 ring_buffer_read(ring_iter, NULL);
483
484         return next;
485 }
486
487 /* Signal a overhead of time execution to the output */
488 static int
489 print_graph_overhead(unsigned long long duration, struct trace_seq *s)
490 {
491         /* If duration disappear, we don't need anything */
492         if (!(tracer_flags.val & TRACE_GRAPH_PRINT_DURATION))
493                 return 1;
494
495         /* Non nested entry or return */
496         if (duration == -1)
497                 return trace_seq_printf(s, "  ");
498
499         if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
500                 /* Duration exceeded 100 msecs */
501                 if (duration > 100000ULL)
502                         return trace_seq_printf(s, "! ");
503
504                 /* Duration exceeded 10 msecs */
505                 if (duration > 10000ULL)
506                         return trace_seq_printf(s, "+ ");
507         }
508
509         return trace_seq_printf(s, "  ");
510 }
511
512 static int print_graph_abs_time(u64 t, struct trace_seq *s)
513 {
514         unsigned long usecs_rem;
515
516         usecs_rem = do_div(t, NSEC_PER_SEC);
517         usecs_rem /= 1000;
518
519         return trace_seq_printf(s, "%5lu.%06lu |  ",
520                         (unsigned long)t, usecs_rem);
521 }
522
523 static enum print_line_t
524 print_graph_irq(struct trace_iterator *iter, unsigned long addr,
525                 enum trace_type type, int cpu, pid_t pid)
526 {
527         int ret;
528         struct trace_seq *s = &iter->seq;
529
530         if (addr < (unsigned long)__irqentry_text_start ||
531                 addr >= (unsigned long)__irqentry_text_end)
532                 return TRACE_TYPE_UNHANDLED;
533
534         /* Absolute time */
535         if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
536                 ret = print_graph_abs_time(iter->ts, s);
537                 if (!ret)
538                         return TRACE_TYPE_PARTIAL_LINE;
539         }
540
541         /* Cpu */
542         if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
543                 ret = print_graph_cpu(s, cpu);
544                 if (ret == TRACE_TYPE_PARTIAL_LINE)
545                         return TRACE_TYPE_PARTIAL_LINE;
546         }
547
548         /* Proc */
549         if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
550                 ret = print_graph_proc(s, pid);
551                 if (ret == TRACE_TYPE_PARTIAL_LINE)
552                         return TRACE_TYPE_PARTIAL_LINE;
553                 ret = trace_seq_printf(s, " | ");
554                 if (!ret)
555                         return TRACE_TYPE_PARTIAL_LINE;
556         }
557
558         /* No overhead */
559         ret = print_graph_overhead(-1, s);
560         if (!ret)
561                 return TRACE_TYPE_PARTIAL_LINE;
562
563         if (type == TRACE_GRAPH_ENT)
564                 ret = trace_seq_printf(s, "==========>");
565         else
566                 ret = trace_seq_printf(s, "<==========");
567
568         if (!ret)
569                 return TRACE_TYPE_PARTIAL_LINE;
570
571         /* Don't close the duration column if haven't one */
572         if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
573                 trace_seq_printf(s, " |");
574         ret = trace_seq_printf(s, "\n");
575
576         if (!ret)
577                 return TRACE_TYPE_PARTIAL_LINE;
578         return TRACE_TYPE_HANDLED;
579 }
580
581 enum print_line_t
582 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
583 {
584         unsigned long nsecs_rem = do_div(duration, 1000);
585         /* log10(ULONG_MAX) + '\0' */
586         char msecs_str[21];
587         char nsecs_str[5];
588         int ret, len;
589         int i;
590
591         sprintf(msecs_str, "%lu", (unsigned long) duration);
592
593         /* Print msecs */
594         ret = trace_seq_printf(s, "%s", msecs_str);
595         if (!ret)
596                 return TRACE_TYPE_PARTIAL_LINE;
597
598         len = strlen(msecs_str);
599
600         /* Print nsecs (we don't want to exceed 7 numbers) */
601         if (len < 7) {
602                 snprintf(nsecs_str, 8 - len, "%03lu", nsecs_rem);
603                 ret = trace_seq_printf(s, ".%s", nsecs_str);
604                 if (!ret)
605                         return TRACE_TYPE_PARTIAL_LINE;
606                 len += strlen(nsecs_str);
607         }
608
609         ret = trace_seq_printf(s, " us ");
610         if (!ret)
611                 return TRACE_TYPE_PARTIAL_LINE;
612
613         /* Print remaining spaces to fit the row's width */
614         for (i = len; i < 7; i++) {
615                 ret = trace_seq_printf(s, " ");
616                 if (!ret)
617                         return TRACE_TYPE_PARTIAL_LINE;
618         }
619         return TRACE_TYPE_HANDLED;
620 }
621
622 static enum print_line_t
623 print_graph_duration(unsigned long long duration, struct trace_seq *s)
624 {
625         int ret;
626
627         ret = trace_print_graph_duration(duration, s);
628         if (ret != TRACE_TYPE_HANDLED)
629                 return ret;
630
631         ret = trace_seq_printf(s, "|  ");
632         if (!ret)
633                 return TRACE_TYPE_PARTIAL_LINE;
634
635         return TRACE_TYPE_HANDLED;
636 }
637
638 /* Case of a leaf function on its call entry */
639 static enum print_line_t
640 print_graph_entry_leaf(struct trace_iterator *iter,
641                 struct ftrace_graph_ent_entry *entry,
642                 struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s)
643 {
644         struct fgraph_data *data = iter->private;
645         struct ftrace_graph_ret *graph_ret;
646         struct ftrace_graph_ent *call;
647         unsigned long long duration;
648         int ret;
649         int i;
650
651         graph_ret = &ret_entry->ret;
652         call = &entry->graph_ent;
653         duration = graph_ret->rettime - graph_ret->calltime;
654
655         if (data) {
656                 int cpu = iter->cpu;
657                 int *depth = &(per_cpu_ptr(data, cpu)->depth);
658
659                 /*
660                  * Comments display at + 1 to depth. Since
661                  * this is a leaf function, keep the comments
662                  * equal to this depth.
663                  */
664                 *depth = call->depth - 1;
665         }
666
667         /* Overhead */
668         ret = print_graph_overhead(duration, s);
669         if (!ret)
670                 return TRACE_TYPE_PARTIAL_LINE;
671
672         /* Duration */
673         if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
674                 ret = print_graph_duration(duration, s);
675                 if (ret == TRACE_TYPE_PARTIAL_LINE)
676                         return TRACE_TYPE_PARTIAL_LINE;
677         }
678
679         /* Function */
680         for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
681                 ret = trace_seq_printf(s, " ");
682                 if (!ret)
683                         return TRACE_TYPE_PARTIAL_LINE;
684         }
685
686         ret = trace_seq_printf(s, "%pf();\n", (void *)call->func);
687         if (!ret)
688                 return TRACE_TYPE_PARTIAL_LINE;
689
690         return TRACE_TYPE_HANDLED;
691 }
692
693 static enum print_line_t
694 print_graph_entry_nested(struct trace_iterator *iter,
695                          struct ftrace_graph_ent_entry *entry,
696                          struct trace_seq *s, int cpu)
697 {
698         struct ftrace_graph_ent *call = &entry->graph_ent;
699         struct fgraph_data *data = iter->private;
700         int ret;
701         int i;
702
703         if (data) {
704                 int cpu = iter->cpu;
705                 int *depth = &(per_cpu_ptr(data, cpu)->depth);
706
707                 *depth = call->depth;
708         }
709
710         /* No overhead */
711         ret = print_graph_overhead(-1, s);
712         if (!ret)
713                 return TRACE_TYPE_PARTIAL_LINE;
714
715         /* No time */
716         if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
717                 ret = trace_seq_printf(s, "            |  ");
718                 if (!ret)
719                         return TRACE_TYPE_PARTIAL_LINE;
720         }
721
722         /* Function */
723         for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
724                 ret = trace_seq_printf(s, " ");
725                 if (!ret)
726                         return TRACE_TYPE_PARTIAL_LINE;
727         }
728
729         ret = trace_seq_printf(s, "%pf() {\n", (void *)call->func);
730         if (!ret)
731                 return TRACE_TYPE_PARTIAL_LINE;
732
733         /*
734          * we already consumed the current entry to check the next one
735          * and see if this is a leaf.
736          */
737         return TRACE_TYPE_NO_CONSUME;
738 }
739
740 static enum print_line_t
741 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
742                      int type, unsigned long addr)
743 {
744         struct fgraph_data *data = iter->private;
745         struct trace_entry *ent = iter->ent;
746         int cpu = iter->cpu;
747         int ret;
748
749         /* Pid */
750         if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
751                 return TRACE_TYPE_PARTIAL_LINE;
752
753         if (type) {
754                 /* Interrupt */
755                 ret = print_graph_irq(iter, addr, type, cpu, ent->pid);
756                 if (ret == TRACE_TYPE_PARTIAL_LINE)
757                         return TRACE_TYPE_PARTIAL_LINE;
758         }
759
760         /* Absolute time */
761         if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
762                 ret = print_graph_abs_time(iter->ts, s);
763                 if (!ret)
764                         return TRACE_TYPE_PARTIAL_LINE;
765         }
766
767         /* Cpu */
768         if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
769                 ret = print_graph_cpu(s, cpu);
770                 if (ret == TRACE_TYPE_PARTIAL_LINE)
771                         return TRACE_TYPE_PARTIAL_LINE;
772         }
773
774         /* Proc */
775         if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
776                 ret = print_graph_proc(s, ent->pid);
777                 if (ret == TRACE_TYPE_PARTIAL_LINE)
778                         return TRACE_TYPE_PARTIAL_LINE;
779
780                 ret = trace_seq_printf(s, " | ");
781                 if (!ret)
782                         return TRACE_TYPE_PARTIAL_LINE;
783         }
784
785         /* Latency format */
786         if (trace_flags & TRACE_ITER_LATENCY_FMT) {
787                 ret = print_graph_lat_fmt(s, ent);
788                 if (ret == TRACE_TYPE_PARTIAL_LINE)
789                         return TRACE_TYPE_PARTIAL_LINE;
790         }
791
792         return 0;
793 }
794
795 static enum print_line_t
796 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
797                         struct trace_iterator *iter)
798 {
799         int cpu = iter->cpu;
800         struct ftrace_graph_ent *call = &field->graph_ent;
801         struct ftrace_graph_ret_entry *leaf_ret;
802
803         if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func))
804                 return TRACE_TYPE_PARTIAL_LINE;
805
806         leaf_ret = get_return_for_leaf(iter, field);
807         if (leaf_ret)
808                 return print_graph_entry_leaf(iter, field, leaf_ret, s);
809         else
810                 return print_graph_entry_nested(iter, field, s, cpu);
811
812 }
813
814 static enum print_line_t
815 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
816                    struct trace_entry *ent, struct trace_iterator *iter)
817 {
818         unsigned long long duration = trace->rettime - trace->calltime;
819         struct fgraph_data *data = iter->private;
820         pid_t pid = ent->pid;
821         int cpu = iter->cpu;
822         int ret;
823         int i;
824
825         if (data) {
826                 int cpu = iter->cpu;
827                 int *depth = &(per_cpu_ptr(data, cpu)->depth);
828
829                 /*
830                  * Comments display at + 1 to depth. This is the
831                  * return from a function, we now want the comments
832                  * to display at the same level of the bracket.
833                  */
834                 *depth = trace->depth - 1;
835         }
836
837         if (print_graph_prologue(iter, s, 0, 0))
838                 return TRACE_TYPE_PARTIAL_LINE;
839
840         /* Overhead */
841         ret = print_graph_overhead(duration, s);
842         if (!ret)
843                 return TRACE_TYPE_PARTIAL_LINE;
844
845         /* Duration */
846         if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
847                 ret = print_graph_duration(duration, s);
848                 if (ret == TRACE_TYPE_PARTIAL_LINE)
849                         return TRACE_TYPE_PARTIAL_LINE;
850         }
851
852         /* Closing brace */
853         for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
854                 ret = trace_seq_printf(s, " ");
855                 if (!ret)
856                         return TRACE_TYPE_PARTIAL_LINE;
857         }
858
859         ret = trace_seq_printf(s, "}\n");
860         if (!ret)
861                 return TRACE_TYPE_PARTIAL_LINE;
862
863         /* Overrun */
864         if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
865                 ret = trace_seq_printf(s, " (Overruns: %lu)\n",
866                                         trace->overrun);
867                 if (!ret)
868                         return TRACE_TYPE_PARTIAL_LINE;
869         }
870
871         ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, cpu, pid);
872         if (ret == TRACE_TYPE_PARTIAL_LINE)
873                 return TRACE_TYPE_PARTIAL_LINE;
874
875         return TRACE_TYPE_HANDLED;
876 }
877
878 static enum print_line_t
879 print_graph_comment(struct trace_seq *s,  struct trace_entry *ent,
880                     struct trace_iterator *iter)
881 {
882         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
883         struct fgraph_data *data = iter->private;
884         struct trace_event *event;
885         int depth = 0;
886         int ret;
887         int i;
888
889         if (data)
890                 depth = per_cpu_ptr(data, iter->cpu)->depth;
891
892         if (print_graph_prologue(iter, s, 0, 0))
893                 return TRACE_TYPE_PARTIAL_LINE;
894
895         /* No overhead */
896         ret = print_graph_overhead(-1, s);
897         if (!ret)
898                 return TRACE_TYPE_PARTIAL_LINE;
899
900         /* No time */
901         if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
902                 ret = trace_seq_printf(s, "            |  ");
903                 if (!ret)
904                         return TRACE_TYPE_PARTIAL_LINE;
905         }
906
907         /* Indentation */
908         if (depth > 0)
909                 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
910                         ret = trace_seq_printf(s, " ");
911                         if (!ret)
912                                 return TRACE_TYPE_PARTIAL_LINE;
913                 }
914
915         /* The comment */
916         ret = trace_seq_printf(s, "/* ");
917         if (!ret)
918                 return TRACE_TYPE_PARTIAL_LINE;
919
920         switch (iter->ent->type) {
921         case TRACE_BPRINT:
922                 ret = trace_print_bprintk_msg_only(iter);
923                 if (ret != TRACE_TYPE_HANDLED)
924                         return ret;
925                 break;
926         case TRACE_PRINT:
927                 ret = trace_print_printk_msg_only(iter);
928                 if (ret != TRACE_TYPE_HANDLED)
929                         return ret;
930                 break;
931         default:
932                 event = ftrace_find_event(ent->type);
933                 if (!event)
934                         return TRACE_TYPE_UNHANDLED;
935
936                 ret = event->trace(iter, sym_flags);
937                 if (ret != TRACE_TYPE_HANDLED)
938                         return ret;
939         }
940
941         /* Strip ending newline */
942         if (s->buffer[s->len - 1] == '\n') {
943                 s->buffer[s->len - 1] = '\0';
944                 s->len--;
945         }
946
947         ret = trace_seq_printf(s, " */\n");
948         if (!ret)
949                 return TRACE_TYPE_PARTIAL_LINE;
950
951         return TRACE_TYPE_HANDLED;
952 }
953
954
955 enum print_line_t
956 print_graph_function(struct trace_iterator *iter)
957 {
958         struct trace_entry *entry = iter->ent;
959         struct trace_seq *s = &iter->seq;
960
961         switch (entry->type) {
962         case TRACE_GRAPH_ENT: {
963                 /*
964                  * print_graph_entry() may consume the current event,
965                  * thus @field may become invalid, so we need to save it.
966                  * sizeof(struct ftrace_graph_ent_entry) is very small,
967                  * it can be safely saved at the stack.
968                  */
969                 struct ftrace_graph_ent_entry *field, saved;
970                 trace_assign_type(field, entry);
971                 saved = *field;
972                 return print_graph_entry(&saved, s, iter);
973         }
974         case TRACE_GRAPH_RET: {
975                 struct ftrace_graph_ret_entry *field;
976                 trace_assign_type(field, entry);
977                 return print_graph_return(&field->ret, s, entry, iter);
978         }
979         default:
980                 return print_graph_comment(s, entry, iter);
981         }
982
983         return TRACE_TYPE_HANDLED;
984 }
985
986 static void print_lat_header(struct seq_file *s)
987 {
988         static const char spaces[] = "                " /* 16 spaces */
989                 "    "                                  /* 4 spaces */
990                 "                 ";                    /* 17 spaces */
991         int size = 0;
992
993         if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
994                 size += 16;
995         if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
996                 size += 4;
997         if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
998                 size += 17;
999
1000         seq_printf(s, "#%.*s  _-----=> irqs-off        \n", size, spaces);
1001         seq_printf(s, "#%.*s / _----=> need-resched    \n", size, spaces);
1002         seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1003         seq_printf(s, "#%.*s|| / _--=> preempt-depth   \n", size, spaces);
1004         seq_printf(s, "#%.*s||| /                      \n", size, spaces);
1005         seq_printf(s, "#%.*s||||                       \n", size, spaces);
1006 }
1007
1008 static void print_graph_headers(struct seq_file *s)
1009 {
1010         int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
1011
1012         if (lat)
1013                 print_lat_header(s);
1014
1015         /* 1st line */
1016         seq_printf(s, "#");
1017         if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
1018                 seq_printf(s, "     TIME       ");
1019         if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
1020                 seq_printf(s, " CPU");
1021         if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
1022                 seq_printf(s, "  TASK/PID       ");
1023         if (lat)
1024                 seq_printf(s, "||||");
1025         if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
1026                 seq_printf(s, "  DURATION   ");
1027         seq_printf(s, "               FUNCTION CALLS\n");
1028
1029         /* 2nd line */
1030         seq_printf(s, "#");
1031         if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
1032                 seq_printf(s, "      |         ");
1033         if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
1034                 seq_printf(s, " |  ");
1035         if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
1036                 seq_printf(s, "   |    |        ");
1037         if (lat)
1038                 seq_printf(s, "||||");
1039         if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
1040                 seq_printf(s, "   |   |      ");
1041         seq_printf(s, "               |   |   |   |\n");
1042 }
1043
1044 static void graph_trace_open(struct trace_iterator *iter)
1045 {
1046         /* pid and depth on the last trace processed */
1047         struct fgraph_data *data = alloc_percpu(struct fgraph_data);
1048         int cpu;
1049
1050         if (!data)
1051                 pr_warning("function graph tracer: not enough memory\n");
1052         else
1053                 for_each_possible_cpu(cpu) {
1054                         pid_t *pid = &(per_cpu_ptr(data, cpu)->last_pid);
1055                         int *depth = &(per_cpu_ptr(data, cpu)->depth);
1056                         *pid = -1;
1057                         *depth = 0;
1058                 }
1059
1060         iter->private = data;
1061 }
1062
1063 static void graph_trace_close(struct trace_iterator *iter)
1064 {
1065         free_percpu(iter->private);
1066 }
1067
1068 static struct tracer graph_trace __read_mostly = {
1069         .name           = "function_graph",
1070         .open           = graph_trace_open,
1071         .close          = graph_trace_close,
1072         .wait_pipe      = poll_wait_pipe,
1073         .init           = graph_trace_init,
1074         .reset          = graph_trace_reset,
1075         .print_line     = print_graph_function,
1076         .print_header   = print_graph_headers,
1077         .flags          = &tracer_flags,
1078 #ifdef CONFIG_FTRACE_SELFTEST
1079         .selftest       = trace_selftest_startup_function_graph,
1080 #endif
1081 };
1082
1083 static __init int init_graph_trace(void)
1084 {
1085         max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1086
1087         return register_tracer(&graph_trace);
1088 }
1089
1090 device_initcall(init_graph_trace);