tracing/events: Add ftrace_event_call param to define_fields()
[safe/jmp/linux-2.6] / kernel / trace / trace_functions_graph.c
1 /*
2  *
3  * Function graph tracer.
4  * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5  * Mostly borrowed from function tracer which
6  * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7  *
8  */
9 #include <linux/debugfs.h>
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
12 #include <linux/fs.h>
13
14 #include "trace.h"
15 #include "trace_output.h"
16
17 struct fgraph_data {
18         pid_t           last_pid;
19         int             depth;
20 };
21
22 #define TRACE_GRAPH_INDENT      2
23
24 /* Flag options */
25 #define TRACE_GRAPH_PRINT_OVERRUN       0x1
26 #define TRACE_GRAPH_PRINT_CPU           0x2
27 #define TRACE_GRAPH_PRINT_OVERHEAD      0x4
28 #define TRACE_GRAPH_PRINT_PROC          0x8
29 #define TRACE_GRAPH_PRINT_DURATION      0x10
30 #define TRACE_GRAPH_PRINT_ABS_TIME      0X20
31
32 static struct tracer_opt trace_opts[] = {
33         /* Display overruns? (for self-debug purpose) */
34         { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
35         /* Display CPU ? */
36         { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
37         /* Display Overhead ? */
38         { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
39         /* Display proc name/pid */
40         { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
41         /* Display duration of execution */
42         { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
43         /* Display absolute time of an entry */
44         { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
45         { } /* Empty entry */
46 };
47
48 static struct tracer_flags tracer_flags = {
49         /* Don't display overruns and proc by default */
50         .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
51                TRACE_GRAPH_PRINT_DURATION,
52         .opts = trace_opts
53 };
54
55 static struct trace_array *graph_array;
56
57
58 /* Add a function return address to the trace stack on thread info.*/
59 int
60 ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
61                          unsigned long frame_pointer)
62 {
63         unsigned long long calltime;
64         int index;
65
66         if (!current->ret_stack)
67                 return -EBUSY;
68
69         /*
70          * We must make sure the ret_stack is tested before we read
71          * anything else.
72          */
73         smp_rmb();
74
75         /* The return trace stack is full */
76         if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
77                 atomic_inc(&current->trace_overrun);
78                 return -EBUSY;
79         }
80
81         calltime = trace_clock_local();
82
83         index = ++current->curr_ret_stack;
84         barrier();
85         current->ret_stack[index].ret = ret;
86         current->ret_stack[index].func = func;
87         current->ret_stack[index].calltime = calltime;
88         current->ret_stack[index].subtime = 0;
89         current->ret_stack[index].fp = frame_pointer;
90         *depth = index;
91
92         return 0;
93 }
94
95 /* Retrieve a function return address to the trace stack on thread info.*/
96 static void
97 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
98                         unsigned long frame_pointer)
99 {
100         int index;
101
102         index = current->curr_ret_stack;
103
104         if (unlikely(index < 0)) {
105                 ftrace_graph_stop();
106                 WARN_ON(1);
107                 /* Might as well panic, otherwise we have no where to go */
108                 *ret = (unsigned long)panic;
109                 return;
110         }
111
112 #ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
113         /*
114          * The arch may choose to record the frame pointer used
115          * and check it here to make sure that it is what we expect it
116          * to be. If gcc does not set the place holder of the return
117          * address in the frame pointer, and does a copy instead, then
118          * the function graph trace will fail. This test detects this
119          * case.
120          *
121          * Currently, x86_32 with optimize for size (-Os) makes the latest
122          * gcc do the above.
123          */
124         if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
125                 ftrace_graph_stop();
126                 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
127                      "  from func %pF return to %lx\n",
128                      current->ret_stack[index].fp,
129                      frame_pointer,
130                      (void *)current->ret_stack[index].func,
131                      current->ret_stack[index].ret);
132                 *ret = (unsigned long)panic;
133                 return;
134         }
135 #endif
136
137         *ret = current->ret_stack[index].ret;
138         trace->func = current->ret_stack[index].func;
139         trace->calltime = current->ret_stack[index].calltime;
140         trace->overrun = atomic_read(&current->trace_overrun);
141         trace->depth = index;
142 }
143
144 /*
145  * Send the trace to the ring-buffer.
146  * @return the original return address.
147  */
148 unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
149 {
150         struct ftrace_graph_ret trace;
151         unsigned long ret;
152
153         ftrace_pop_return_trace(&trace, &ret, frame_pointer);
154         trace.rettime = trace_clock_local();
155         ftrace_graph_return(&trace);
156         barrier();
157         current->curr_ret_stack--;
158
159         if (unlikely(!ret)) {
160                 ftrace_graph_stop();
161                 WARN_ON(1);
162                 /* Might as well panic. What else to do? */
163                 ret = (unsigned long)panic;
164         }
165
166         return ret;
167 }
168
169 static int __trace_graph_entry(struct trace_array *tr,
170                                 struct ftrace_graph_ent *trace,
171                                 unsigned long flags,
172                                 int pc)
173 {
174         struct ftrace_event_call *call = &event_funcgraph_entry;
175         struct ring_buffer_event *event;
176         struct ftrace_graph_ent_entry *entry;
177
178         if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
179                 return 0;
180
181         event = trace_buffer_lock_reserve(tr, TRACE_GRAPH_ENT,
182                                           sizeof(*entry), flags, pc);
183         if (!event)
184                 return 0;
185         entry   = ring_buffer_event_data(event);
186         entry->graph_ent                        = *trace;
187         if (!filter_current_check_discard(call, entry, event))
188                 ring_buffer_unlock_commit(tr->buffer, event);
189
190         return 1;
191 }
192
193 int trace_graph_entry(struct ftrace_graph_ent *trace)
194 {
195         struct trace_array *tr = graph_array;
196         struct trace_array_cpu *data;
197         unsigned long flags;
198         long disabled;
199         int ret;
200         int cpu;
201         int pc;
202
203         if (unlikely(!tr))
204                 return 0;
205
206         if (!ftrace_trace_task(current))
207                 return 0;
208
209         if (!ftrace_graph_addr(trace->func))
210                 return 0;
211
212         local_irq_save(flags);
213         cpu = raw_smp_processor_id();
214         data = tr->data[cpu];
215         disabled = atomic_inc_return(&data->disabled);
216         if (likely(disabled == 1)) {
217                 pc = preempt_count();
218                 ret = __trace_graph_entry(tr, trace, flags, pc);
219         } else {
220                 ret = 0;
221         }
222         /* Only do the atomic if it is not already set */
223         if (!test_tsk_trace_graph(current))
224                 set_tsk_trace_graph(current);
225
226         atomic_dec(&data->disabled);
227         local_irq_restore(flags);
228
229         return ret;
230 }
231
232 static void __trace_graph_return(struct trace_array *tr,
233                                 struct ftrace_graph_ret *trace,
234                                 unsigned long flags,
235                                 int pc)
236 {
237         struct ftrace_event_call *call = &event_funcgraph_exit;
238         struct ring_buffer_event *event;
239         struct ftrace_graph_ret_entry *entry;
240
241         if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
242                 return;
243
244         event = trace_buffer_lock_reserve(tr, TRACE_GRAPH_RET,
245                                           sizeof(*entry), flags, pc);
246         if (!event)
247                 return;
248         entry   = ring_buffer_event_data(event);
249         entry->ret                              = *trace;
250         if (!filter_current_check_discard(call, entry, event))
251                 ring_buffer_unlock_commit(tr->buffer, event);
252 }
253
254 void trace_graph_return(struct ftrace_graph_ret *trace)
255 {
256         struct trace_array *tr = graph_array;
257         struct trace_array_cpu *data;
258         unsigned long flags;
259         long disabled;
260         int cpu;
261         int pc;
262
263         local_irq_save(flags);
264         cpu = raw_smp_processor_id();
265         data = tr->data[cpu];
266         disabled = atomic_inc_return(&data->disabled);
267         if (likely(disabled == 1)) {
268                 pc = preempt_count();
269                 __trace_graph_return(tr, trace, flags, pc);
270         }
271         if (!trace->depth)
272                 clear_tsk_trace_graph(current);
273         atomic_dec(&data->disabled);
274         local_irq_restore(flags);
275 }
276
277 static int graph_trace_init(struct trace_array *tr)
278 {
279         int ret;
280
281         graph_array = tr;
282         ret = register_ftrace_graph(&trace_graph_return,
283                                     &trace_graph_entry);
284         if (ret)
285                 return ret;
286         tracing_start_cmdline_record();
287
288         return 0;
289 }
290
291 void set_graph_array(struct trace_array *tr)
292 {
293         graph_array = tr;
294 }
295
296 static void graph_trace_reset(struct trace_array *tr)
297 {
298         tracing_stop_cmdline_record();
299         unregister_ftrace_graph();
300 }
301
302 static int max_bytes_for_cpu;
303
304 static enum print_line_t
305 print_graph_cpu(struct trace_seq *s, int cpu)
306 {
307         int ret;
308
309         /*
310          * Start with a space character - to make it stand out
311          * to the right a bit when trace output is pasted into
312          * email:
313          */
314         ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
315         if (!ret)
316                 return TRACE_TYPE_PARTIAL_LINE;
317
318         return TRACE_TYPE_HANDLED;
319 }
320
321 #define TRACE_GRAPH_PROCINFO_LENGTH     14
322
323 static enum print_line_t
324 print_graph_proc(struct trace_seq *s, pid_t pid)
325 {
326         char comm[TASK_COMM_LEN];
327         /* sign + log10(MAX_INT) + '\0' */
328         char pid_str[11];
329         int spaces = 0;
330         int ret;
331         int len;
332         int i;
333
334         trace_find_cmdline(pid, comm);
335         comm[7] = '\0';
336         sprintf(pid_str, "%d", pid);
337
338         /* 1 stands for the "-" character */
339         len = strlen(comm) + strlen(pid_str) + 1;
340
341         if (len < TRACE_GRAPH_PROCINFO_LENGTH)
342                 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
343
344         /* First spaces to align center */
345         for (i = 0; i < spaces / 2; i++) {
346                 ret = trace_seq_printf(s, " ");
347                 if (!ret)
348                         return TRACE_TYPE_PARTIAL_LINE;
349         }
350
351         ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
352         if (!ret)
353                 return TRACE_TYPE_PARTIAL_LINE;
354
355         /* Last spaces to align center */
356         for (i = 0; i < spaces - (spaces / 2); i++) {
357                 ret = trace_seq_printf(s, " ");
358                 if (!ret)
359                         return TRACE_TYPE_PARTIAL_LINE;
360         }
361         return TRACE_TYPE_HANDLED;
362 }
363
364
365 /* If the pid changed since the last trace, output this event */
366 static enum print_line_t
367 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
368 {
369         pid_t prev_pid;
370         pid_t *last_pid;
371         int ret;
372
373         if (!data)
374                 return TRACE_TYPE_HANDLED;
375
376         last_pid = &(per_cpu_ptr(data, cpu)->last_pid);
377
378         if (*last_pid == pid)
379                 return TRACE_TYPE_HANDLED;
380
381         prev_pid = *last_pid;
382         *last_pid = pid;
383
384         if (prev_pid == -1)
385                 return TRACE_TYPE_HANDLED;
386 /*
387  * Context-switch trace line:
388
389  ------------------------------------------
390  | 1)  migration/0--1  =>  sshd-1755
391  ------------------------------------------
392
393  */
394         ret = trace_seq_printf(s,
395                 " ------------------------------------------\n");
396         if (!ret)
397                 return TRACE_TYPE_PARTIAL_LINE;
398
399         ret = print_graph_cpu(s, cpu);
400         if (ret == TRACE_TYPE_PARTIAL_LINE)
401                 return TRACE_TYPE_PARTIAL_LINE;
402
403         ret = print_graph_proc(s, prev_pid);
404         if (ret == TRACE_TYPE_PARTIAL_LINE)
405                 return TRACE_TYPE_PARTIAL_LINE;
406
407         ret = trace_seq_printf(s, " => ");
408         if (!ret)
409                 return TRACE_TYPE_PARTIAL_LINE;
410
411         ret = print_graph_proc(s, pid);
412         if (ret == TRACE_TYPE_PARTIAL_LINE)
413                 return TRACE_TYPE_PARTIAL_LINE;
414
415         ret = trace_seq_printf(s,
416                 "\n ------------------------------------------\n\n");
417         if (!ret)
418                 return TRACE_TYPE_PARTIAL_LINE;
419
420         return TRACE_TYPE_HANDLED;
421 }
422
423 static struct ftrace_graph_ret_entry *
424 get_return_for_leaf(struct trace_iterator *iter,
425                 struct ftrace_graph_ent_entry *curr)
426 {
427         struct ring_buffer_iter *ring_iter;
428         struct ring_buffer_event *event;
429         struct ftrace_graph_ret_entry *next;
430
431         ring_iter = iter->buffer_iter[iter->cpu];
432
433         /* First peek to compare current entry and the next one */
434         if (ring_iter)
435                 event = ring_buffer_iter_peek(ring_iter, NULL);
436         else {
437         /* We need to consume the current entry to see the next one */
438                 ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
439                 event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
440                                         NULL);
441         }
442
443         if (!event)
444                 return NULL;
445
446         next = ring_buffer_event_data(event);
447
448         if (next->ent.type != TRACE_GRAPH_RET)
449                 return NULL;
450
451         if (curr->ent.pid != next->ent.pid ||
452                         curr->graph_ent.func != next->ret.func)
453                 return NULL;
454
455         /* this is a leaf, now advance the iterator */
456         if (ring_iter)
457                 ring_buffer_read(ring_iter, NULL);
458
459         return next;
460 }
461
462 /* Signal a overhead of time execution to the output */
463 static int
464 print_graph_overhead(unsigned long long duration, struct trace_seq *s)
465 {
466         /* If duration disappear, we don't need anything */
467         if (!(tracer_flags.val & TRACE_GRAPH_PRINT_DURATION))
468                 return 1;
469
470         /* Non nested entry or return */
471         if (duration == -1)
472                 return trace_seq_printf(s, "  ");
473
474         if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
475                 /* Duration exceeded 100 msecs */
476                 if (duration > 100000ULL)
477                         return trace_seq_printf(s, "! ");
478
479                 /* Duration exceeded 10 msecs */
480                 if (duration > 10000ULL)
481                         return trace_seq_printf(s, "+ ");
482         }
483
484         return trace_seq_printf(s, "  ");
485 }
486
487 static int print_graph_abs_time(u64 t, struct trace_seq *s)
488 {
489         unsigned long usecs_rem;
490
491         usecs_rem = do_div(t, NSEC_PER_SEC);
492         usecs_rem /= 1000;
493
494         return trace_seq_printf(s, "%5lu.%06lu |  ",
495                         (unsigned long)t, usecs_rem);
496 }
497
498 static enum print_line_t
499 print_graph_irq(struct trace_iterator *iter, unsigned long addr,
500                 enum trace_type type, int cpu, pid_t pid)
501 {
502         int ret;
503         struct trace_seq *s = &iter->seq;
504
505         if (addr < (unsigned long)__irqentry_text_start ||
506                 addr >= (unsigned long)__irqentry_text_end)
507                 return TRACE_TYPE_UNHANDLED;
508
509         /* Absolute time */
510         if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
511                 ret = print_graph_abs_time(iter->ts, s);
512                 if (!ret)
513                         return TRACE_TYPE_PARTIAL_LINE;
514         }
515
516         /* Cpu */
517         if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
518                 ret = print_graph_cpu(s, cpu);
519                 if (ret == TRACE_TYPE_PARTIAL_LINE)
520                         return TRACE_TYPE_PARTIAL_LINE;
521         }
522         /* Proc */
523         if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
524                 ret = print_graph_proc(s, pid);
525                 if (ret == TRACE_TYPE_PARTIAL_LINE)
526                         return TRACE_TYPE_PARTIAL_LINE;
527                 ret = trace_seq_printf(s, " | ");
528                 if (!ret)
529                         return TRACE_TYPE_PARTIAL_LINE;
530         }
531
532         /* No overhead */
533         ret = print_graph_overhead(-1, s);
534         if (!ret)
535                 return TRACE_TYPE_PARTIAL_LINE;
536
537         if (type == TRACE_GRAPH_ENT)
538                 ret = trace_seq_printf(s, "==========>");
539         else
540                 ret = trace_seq_printf(s, "<==========");
541
542         if (!ret)
543                 return TRACE_TYPE_PARTIAL_LINE;
544
545         /* Don't close the duration column if haven't one */
546         if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
547                 trace_seq_printf(s, " |");
548         ret = trace_seq_printf(s, "\n");
549
550         if (!ret)
551                 return TRACE_TYPE_PARTIAL_LINE;
552         return TRACE_TYPE_HANDLED;
553 }
554
555 enum print_line_t
556 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
557 {
558         unsigned long nsecs_rem = do_div(duration, 1000);
559         /* log10(ULONG_MAX) + '\0' */
560         char msecs_str[21];
561         char nsecs_str[5];
562         int ret, len;
563         int i;
564
565         sprintf(msecs_str, "%lu", (unsigned long) duration);
566
567         /* Print msecs */
568         ret = trace_seq_printf(s, "%s", msecs_str);
569         if (!ret)
570                 return TRACE_TYPE_PARTIAL_LINE;
571
572         len = strlen(msecs_str);
573
574         /* Print nsecs (we don't want to exceed 7 numbers) */
575         if (len < 7) {
576                 snprintf(nsecs_str, 8 - len, "%03lu", nsecs_rem);
577                 ret = trace_seq_printf(s, ".%s", nsecs_str);
578                 if (!ret)
579                         return TRACE_TYPE_PARTIAL_LINE;
580                 len += strlen(nsecs_str);
581         }
582
583         ret = trace_seq_printf(s, " us ");
584         if (!ret)
585                 return TRACE_TYPE_PARTIAL_LINE;
586
587         /* Print remaining spaces to fit the row's width */
588         for (i = len; i < 7; i++) {
589                 ret = trace_seq_printf(s, " ");
590                 if (!ret)
591                         return TRACE_TYPE_PARTIAL_LINE;
592         }
593         return TRACE_TYPE_HANDLED;
594 }
595
596 static enum print_line_t
597 print_graph_duration(unsigned long long duration, struct trace_seq *s)
598 {
599         int ret;
600
601         ret = trace_print_graph_duration(duration, s);
602         if (ret != TRACE_TYPE_HANDLED)
603                 return ret;
604
605         ret = trace_seq_printf(s, "|  ");
606         if (!ret)
607                 return TRACE_TYPE_PARTIAL_LINE;
608
609         return TRACE_TYPE_HANDLED;
610 }
611
612 /* Case of a leaf function on its call entry */
613 static enum print_line_t
614 print_graph_entry_leaf(struct trace_iterator *iter,
615                 struct ftrace_graph_ent_entry *entry,
616                 struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s)
617 {
618         struct fgraph_data *data = iter->private;
619         struct ftrace_graph_ret *graph_ret;
620         struct ftrace_graph_ent *call;
621         unsigned long long duration;
622         int ret;
623         int i;
624
625         graph_ret = &ret_entry->ret;
626         call = &entry->graph_ent;
627         duration = graph_ret->rettime - graph_ret->calltime;
628
629         if (data) {
630                 int cpu = iter->cpu;
631                 int *depth = &(per_cpu_ptr(data, cpu)->depth);
632
633                 /*
634                  * Comments display at + 1 to depth. Since
635                  * this is a leaf function, keep the comments
636                  * equal to this depth.
637                  */
638                 *depth = call->depth - 1;
639         }
640
641         /* Overhead */
642         ret = print_graph_overhead(duration, s);
643         if (!ret)
644                 return TRACE_TYPE_PARTIAL_LINE;
645
646         /* Duration */
647         if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
648                 ret = print_graph_duration(duration, s);
649                 if (ret == TRACE_TYPE_PARTIAL_LINE)
650                         return TRACE_TYPE_PARTIAL_LINE;
651         }
652
653         /* Function */
654         for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
655                 ret = trace_seq_printf(s, " ");
656                 if (!ret)
657                         return TRACE_TYPE_PARTIAL_LINE;
658         }
659
660         ret = trace_seq_printf(s, "%pf();\n", (void *)call->func);
661         if (!ret)
662                 return TRACE_TYPE_PARTIAL_LINE;
663
664         return TRACE_TYPE_HANDLED;
665 }
666
667 static enum print_line_t
668 print_graph_entry_nested(struct trace_iterator *iter,
669                          struct ftrace_graph_ent_entry *entry,
670                          struct trace_seq *s, int cpu)
671 {
672         struct ftrace_graph_ent *call = &entry->graph_ent;
673         struct fgraph_data *data = iter->private;
674         int ret;
675         int i;
676
677         if (data) {
678                 int cpu = iter->cpu;
679                 int *depth = &(per_cpu_ptr(data, cpu)->depth);
680
681                 *depth = call->depth;
682         }
683
684         /* No overhead */
685         ret = print_graph_overhead(-1, s);
686         if (!ret)
687                 return TRACE_TYPE_PARTIAL_LINE;
688
689         /* No time */
690         if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
691                 ret = trace_seq_printf(s, "            |  ");
692                 if (!ret)
693                         return TRACE_TYPE_PARTIAL_LINE;
694         }
695
696         /* Function */
697         for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
698                 ret = trace_seq_printf(s, " ");
699                 if (!ret)
700                         return TRACE_TYPE_PARTIAL_LINE;
701         }
702
703         ret = trace_seq_printf(s, "%pf() {\n", (void *)call->func);
704         if (!ret)
705                 return TRACE_TYPE_PARTIAL_LINE;
706
707         /*
708          * we already consumed the current entry to check the next one
709          * and see if this is a leaf.
710          */
711         return TRACE_TYPE_NO_CONSUME;
712 }
713
714 static enum print_line_t
715 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
716                      int type, unsigned long addr)
717 {
718         struct fgraph_data *data = iter->private;
719         struct trace_entry *ent = iter->ent;
720         int cpu = iter->cpu;
721         int ret;
722
723         /* Pid */
724         if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
725                 return TRACE_TYPE_PARTIAL_LINE;
726
727         if (type) {
728                 /* Interrupt */
729                 ret = print_graph_irq(iter, addr, type, cpu, ent->pid);
730                 if (ret == TRACE_TYPE_PARTIAL_LINE)
731                         return TRACE_TYPE_PARTIAL_LINE;
732         }
733
734         /* Absolute time */
735         if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
736                 ret = print_graph_abs_time(iter->ts, s);
737                 if (!ret)
738                         return TRACE_TYPE_PARTIAL_LINE;
739         }
740
741         /* Cpu */
742         if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
743                 ret = print_graph_cpu(s, cpu);
744                 if (ret == TRACE_TYPE_PARTIAL_LINE)
745                         return TRACE_TYPE_PARTIAL_LINE;
746         }
747
748         /* Proc */
749         if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
750                 ret = print_graph_proc(s, ent->pid);
751                 if (ret == TRACE_TYPE_PARTIAL_LINE)
752                         return TRACE_TYPE_PARTIAL_LINE;
753
754                 ret = trace_seq_printf(s, " | ");
755                 if (!ret)
756                         return TRACE_TYPE_PARTIAL_LINE;
757         }
758
759         return 0;
760 }
761
762 static enum print_line_t
763 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
764                         struct trace_iterator *iter)
765 {
766         int cpu = iter->cpu;
767         struct ftrace_graph_ent *call = &field->graph_ent;
768         struct ftrace_graph_ret_entry *leaf_ret;
769
770         if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func))
771                 return TRACE_TYPE_PARTIAL_LINE;
772
773         leaf_ret = get_return_for_leaf(iter, field);
774         if (leaf_ret)
775                 return print_graph_entry_leaf(iter, field, leaf_ret, s);
776         else
777                 return print_graph_entry_nested(iter, field, s, cpu);
778
779 }
780
781 static enum print_line_t
782 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
783                    struct trace_entry *ent, struct trace_iterator *iter)
784 {
785         unsigned long long duration = trace->rettime - trace->calltime;
786         struct fgraph_data *data = iter->private;
787         pid_t pid = ent->pid;
788         int cpu = iter->cpu;
789         int ret;
790         int i;
791
792         if (data) {
793                 int cpu = iter->cpu;
794                 int *depth = &(per_cpu_ptr(data, cpu)->depth);
795
796                 /*
797                  * Comments display at + 1 to depth. This is the
798                  * return from a function, we now want the comments
799                  * to display at the same level of the bracket.
800                  */
801                 *depth = trace->depth - 1;
802         }
803
804         if (print_graph_prologue(iter, s, 0, 0))
805                 return TRACE_TYPE_PARTIAL_LINE;
806
807         /* Overhead */
808         ret = print_graph_overhead(duration, s);
809         if (!ret)
810                 return TRACE_TYPE_PARTIAL_LINE;
811
812         /* Duration */
813         if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
814                 ret = print_graph_duration(duration, s);
815                 if (ret == TRACE_TYPE_PARTIAL_LINE)
816                         return TRACE_TYPE_PARTIAL_LINE;
817         }
818
819         /* Closing brace */
820         for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
821                 ret = trace_seq_printf(s, " ");
822                 if (!ret)
823                         return TRACE_TYPE_PARTIAL_LINE;
824         }
825
826         ret = trace_seq_printf(s, "}\n");
827         if (!ret)
828                 return TRACE_TYPE_PARTIAL_LINE;
829
830         /* Overrun */
831         if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
832                 ret = trace_seq_printf(s, " (Overruns: %lu)\n",
833                                         trace->overrun);
834                 if (!ret)
835                         return TRACE_TYPE_PARTIAL_LINE;
836         }
837
838         ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, cpu, pid);
839         if (ret == TRACE_TYPE_PARTIAL_LINE)
840                 return TRACE_TYPE_PARTIAL_LINE;
841
842         return TRACE_TYPE_HANDLED;
843 }
844
845 static enum print_line_t
846 print_graph_comment(struct trace_seq *s,  struct trace_entry *ent,
847                     struct trace_iterator *iter)
848 {
849         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
850         struct fgraph_data *data = iter->private;
851         struct trace_event *event;
852         int depth = 0;
853         int ret;
854         int i;
855
856         if (data)
857                 depth = per_cpu_ptr(data, iter->cpu)->depth;
858
859         if (print_graph_prologue(iter, s, 0, 0))
860                 return TRACE_TYPE_PARTIAL_LINE;
861
862         /* No overhead */
863         ret = print_graph_overhead(-1, s);
864         if (!ret)
865                 return TRACE_TYPE_PARTIAL_LINE;
866
867         /* No time */
868         if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
869                 ret = trace_seq_printf(s, "            |  ");
870                 if (!ret)
871                         return TRACE_TYPE_PARTIAL_LINE;
872         }
873
874         /* Indentation */
875         if (depth > 0)
876                 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
877                         ret = trace_seq_printf(s, " ");
878                         if (!ret)
879                                 return TRACE_TYPE_PARTIAL_LINE;
880                 }
881
882         /* The comment */
883         ret = trace_seq_printf(s, "/* ");
884         if (!ret)
885                 return TRACE_TYPE_PARTIAL_LINE;
886
887         switch (iter->ent->type) {
888         case TRACE_BPRINT:
889                 ret = trace_print_bprintk_msg_only(iter);
890                 if (ret != TRACE_TYPE_HANDLED)
891                         return ret;
892                 break;
893         case TRACE_PRINT:
894                 ret = trace_print_printk_msg_only(iter);
895                 if (ret != TRACE_TYPE_HANDLED)
896                         return ret;
897                 break;
898         default:
899                 event = ftrace_find_event(ent->type);
900                 if (!event)
901                         return TRACE_TYPE_UNHANDLED;
902
903                 ret = event->trace(iter, sym_flags);
904                 if (ret != TRACE_TYPE_HANDLED)
905                         return ret;
906         }
907
908         /* Strip ending newline */
909         if (s->buffer[s->len - 1] == '\n') {
910                 s->buffer[s->len - 1] = '\0';
911                 s->len--;
912         }
913
914         ret = trace_seq_printf(s, " */\n");
915         if (!ret)
916                 return TRACE_TYPE_PARTIAL_LINE;
917
918         return TRACE_TYPE_HANDLED;
919 }
920
921
922 enum print_line_t
923 print_graph_function(struct trace_iterator *iter)
924 {
925         struct trace_entry *entry = iter->ent;
926         struct trace_seq *s = &iter->seq;
927
928         switch (entry->type) {
929         case TRACE_GRAPH_ENT: {
930                 /*
931                  * print_graph_entry() may consume the current event,
932                  * thus @field may become invalid, so we need to save it.
933                  * sizeof(struct ftrace_graph_ent_entry) is very small,
934                  * it can be safely saved at the stack.
935                  */
936                 struct ftrace_graph_ent_entry *field, saved;
937                 trace_assign_type(field, entry);
938                 saved = *field;
939                 return print_graph_entry(&saved, s, iter);
940         }
941         case TRACE_GRAPH_RET: {
942                 struct ftrace_graph_ret_entry *field;
943                 trace_assign_type(field, entry);
944                 return print_graph_return(&field->ret, s, entry, iter);
945         }
946         default:
947                 return print_graph_comment(s, entry, iter);
948         }
949
950         return TRACE_TYPE_HANDLED;
951 }
952
953 static void print_graph_headers(struct seq_file *s)
954 {
955         /* 1st line */
956         seq_printf(s, "# ");
957         if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
958                 seq_printf(s, "     TIME       ");
959         if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
960                 seq_printf(s, "CPU");
961         if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
962                 seq_printf(s, "  TASK/PID      ");
963         if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
964                 seq_printf(s, "  DURATION   ");
965         seq_printf(s, "               FUNCTION CALLS\n");
966
967         /* 2nd line */
968         seq_printf(s, "# ");
969         if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
970                 seq_printf(s, "      |         ");
971         if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
972                 seq_printf(s, "|  ");
973         if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
974                 seq_printf(s, "  |    |        ");
975         if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
976                 seq_printf(s, "   |   |      ");
977         seq_printf(s, "               |   |   |   |\n");
978 }
979
980 static void graph_trace_open(struct trace_iterator *iter)
981 {
982         /* pid and depth on the last trace processed */
983         struct fgraph_data *data = alloc_percpu(struct fgraph_data);
984         int cpu;
985
986         if (!data)
987                 pr_warning("function graph tracer: not enough memory\n");
988         else
989                 for_each_possible_cpu(cpu) {
990                         pid_t *pid = &(per_cpu_ptr(data, cpu)->last_pid);
991                         int *depth = &(per_cpu_ptr(data, cpu)->depth);
992                         *pid = -1;
993                         *depth = 0;
994                 }
995
996         iter->private = data;
997 }
998
999 static void graph_trace_close(struct trace_iterator *iter)
1000 {
1001         free_percpu(iter->private);
1002 }
1003
1004 static struct tracer graph_trace __read_mostly = {
1005         .name           = "function_graph",
1006         .open           = graph_trace_open,
1007         .close          = graph_trace_close,
1008         .wait_pipe      = poll_wait_pipe,
1009         .init           = graph_trace_init,
1010         .reset          = graph_trace_reset,
1011         .print_line     = print_graph_function,
1012         .print_header   = print_graph_headers,
1013         .flags          = &tracer_flags,
1014 #ifdef CONFIG_FTRACE_SELFTEST
1015         .selftest       = trace_selftest_startup_function_graph,
1016 #endif
1017 };
1018
1019 static __init int init_graph_trace(void)
1020 {
1021         max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1022
1023         return register_tracer(&graph_trace);
1024 }
1025
1026 device_initcall(init_graph_trace);