trace_syscalls: Remove enter_id exit_id
[safe/jmp/linux-2.6] / kernel / trace / trace_output.c
1 /*
2  * trace_output.c
3  *
4  * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5  *
6  */
7
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/ftrace.h>
11
12 #include "trace_output.h"
13
14 /* must be a power of 2 */
15 #define EVENT_HASHSIZE  128
16
17 DECLARE_RWSEM(trace_event_mutex);
18
19 DEFINE_PER_CPU(struct trace_seq, ftrace_event_seq);
20 EXPORT_PER_CPU_SYMBOL(ftrace_event_seq);
21
22 static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
23
24 static int next_event_type = __TRACE_LAST_TYPE + 1;
25
26 void trace_print_seq(struct seq_file *m, struct trace_seq *s)
27 {
28         int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
29
30         seq_write(m, s->buffer, len);
31
32         trace_seq_init(s);
33 }
34
35 enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
36 {
37         struct trace_seq *s = &iter->seq;
38         struct trace_entry *entry = iter->ent;
39         struct bprint_entry *field;
40         int ret;
41
42         trace_assign_type(field, entry);
43
44         ret = trace_seq_bprintf(s, field->fmt, field->buf);
45         if (!ret)
46                 return TRACE_TYPE_PARTIAL_LINE;
47
48         return TRACE_TYPE_HANDLED;
49 }
50
51 enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
52 {
53         struct trace_seq *s = &iter->seq;
54         struct trace_entry *entry = iter->ent;
55         struct print_entry *field;
56         int ret;
57
58         trace_assign_type(field, entry);
59
60         ret = trace_seq_printf(s, "%s", field->buf);
61         if (!ret)
62                 return TRACE_TYPE_PARTIAL_LINE;
63
64         return TRACE_TYPE_HANDLED;
65 }
66
67 /**
68  * trace_seq_printf - sequence printing of trace information
69  * @s: trace sequence descriptor
70  * @fmt: printf format string
71  *
72  * It returns 0 if the trace oversizes the buffer's free
73  * space, 1 otherwise.
74  *
75  * The tracer may use either sequence operations or its own
76  * copy to user routines. To simplify formating of a trace
77  * trace_seq_printf is used to store strings into a special
78  * buffer (@s). Then the output may be either used by
79  * the sequencer or pulled into another buffer.
80  */
81 int
82 trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
83 {
84         int len = (PAGE_SIZE - 1) - s->len;
85         va_list ap;
86         int ret;
87
88         if (!len)
89                 return 0;
90
91         va_start(ap, fmt);
92         ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
93         va_end(ap);
94
95         /* If we can't write it all, don't bother writing anything */
96         if (ret >= len)
97                 return 0;
98
99         s->len += ret;
100
101         return 1;
102 }
103 EXPORT_SYMBOL_GPL(trace_seq_printf);
104
105 /**
106  * trace_seq_vprintf - sequence printing of trace information
107  * @s: trace sequence descriptor
108  * @fmt: printf format string
109  *
110  * The tracer may use either sequence operations or its own
111  * copy to user routines. To simplify formating of a trace
112  * trace_seq_printf is used to store strings into a special
113  * buffer (@s). Then the output may be either used by
114  * the sequencer or pulled into another buffer.
115  */
116 int
117 trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args)
118 {
119         int len = (PAGE_SIZE - 1) - s->len;
120         int ret;
121
122         if (!len)
123                 return 0;
124
125         ret = vsnprintf(s->buffer + s->len, len, fmt, args);
126
127         /* If we can't write it all, don't bother writing anything */
128         if (ret >= len)
129                 return 0;
130
131         s->len += ret;
132
133         return len;
134 }
135 EXPORT_SYMBOL_GPL(trace_seq_vprintf);
136
137 int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
138 {
139         int len = (PAGE_SIZE - 1) - s->len;
140         int ret;
141
142         if (!len)
143                 return 0;
144
145         ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
146
147         /* If we can't write it all, don't bother writing anything */
148         if (ret >= len)
149                 return 0;
150
151         s->len += ret;
152
153         return len;
154 }
155
156 /**
157  * trace_seq_puts - trace sequence printing of simple string
158  * @s: trace sequence descriptor
159  * @str: simple string to record
160  *
161  * The tracer may use either the sequence operations or its own
162  * copy to user routines. This function records a simple string
163  * into a special buffer (@s) for later retrieval by a sequencer
164  * or other mechanism.
165  */
166 int trace_seq_puts(struct trace_seq *s, const char *str)
167 {
168         int len = strlen(str);
169
170         if (len > ((PAGE_SIZE - 1) - s->len))
171                 return 0;
172
173         memcpy(s->buffer + s->len, str, len);
174         s->len += len;
175
176         return len;
177 }
178
179 int trace_seq_putc(struct trace_seq *s, unsigned char c)
180 {
181         if (s->len >= (PAGE_SIZE - 1))
182                 return 0;
183
184         s->buffer[s->len++] = c;
185
186         return 1;
187 }
188
189 int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len)
190 {
191         if (len > ((PAGE_SIZE - 1) - s->len))
192                 return 0;
193
194         memcpy(s->buffer + s->len, mem, len);
195         s->len += len;
196
197         return len;
198 }
199
200 int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, size_t len)
201 {
202         unsigned char hex[HEX_CHARS];
203         const unsigned char *data = mem;
204         int i, j;
205
206 #ifdef __BIG_ENDIAN
207         for (i = 0, j = 0; i < len; i++) {
208 #else
209         for (i = len-1, j = 0; i >= 0; i--) {
210 #endif
211                 hex[j++] = hex_asc_hi(data[i]);
212                 hex[j++] = hex_asc_lo(data[i]);
213         }
214         hex[j++] = ' ';
215
216         return trace_seq_putmem(s, hex, j);
217 }
218
219 void *trace_seq_reserve(struct trace_seq *s, size_t len)
220 {
221         void *ret;
222
223         if (len > ((PAGE_SIZE - 1) - s->len))
224                 return NULL;
225
226         ret = s->buffer + s->len;
227         s->len += len;
228
229         return ret;
230 }
231
232 int trace_seq_path(struct trace_seq *s, struct path *path)
233 {
234         unsigned char *p;
235
236         if (s->len >= (PAGE_SIZE - 1))
237                 return 0;
238         p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
239         if (!IS_ERR(p)) {
240                 p = mangle_path(s->buffer + s->len, p, "\n");
241                 if (p) {
242                         s->len = p - s->buffer;
243                         return 1;
244                 }
245         } else {
246                 s->buffer[s->len++] = '?';
247                 return 1;
248         }
249
250         return 0;
251 }
252
253 const char *
254 ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
255                        unsigned long flags,
256                        const struct trace_print_flags *flag_array)
257 {
258         unsigned long mask;
259         const char *str;
260         const char *ret = p->buffer + p->len;
261         int i;
262
263         for (i = 0;  flag_array[i].name && flags; i++) {
264
265                 mask = flag_array[i].mask;
266                 if ((flags & mask) != mask)
267                         continue;
268
269                 str = flag_array[i].name;
270                 flags &= ~mask;
271                 if (p->len && delim)
272                         trace_seq_puts(p, delim);
273                 trace_seq_puts(p, str);
274         }
275
276         /* check for left over flags */
277         if (flags) {
278                 if (p->len && delim)
279                         trace_seq_puts(p, delim);
280                 trace_seq_printf(p, "0x%lx", flags);
281         }
282
283         trace_seq_putc(p, 0);
284
285         return ret;
286 }
287 EXPORT_SYMBOL(ftrace_print_flags_seq);
288
289 const char *
290 ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
291                          const struct trace_print_flags *symbol_array)
292 {
293         int i;
294         const char *ret = p->buffer + p->len;
295
296         for (i = 0;  symbol_array[i].name; i++) {
297
298                 if (val != symbol_array[i].mask)
299                         continue;
300
301                 trace_seq_puts(p, symbol_array[i].name);
302                 break;
303         }
304
305         if (!p->len)
306                 trace_seq_printf(p, "0x%lx", val);
307                 
308         trace_seq_putc(p, 0);
309
310         return ret;
311 }
312 EXPORT_SYMBOL(ftrace_print_symbols_seq);
313
314 #ifdef CONFIG_KRETPROBES
315 static inline const char *kretprobed(const char *name)
316 {
317         static const char tramp_name[] = "kretprobe_trampoline";
318         int size = sizeof(tramp_name);
319
320         if (strncmp(tramp_name, name, size) == 0)
321                 return "[unknown/kretprobe'd]";
322         return name;
323 }
324 #else
325 static inline const char *kretprobed(const char *name)
326 {
327         return name;
328 }
329 #endif /* CONFIG_KRETPROBES */
330
331 static int
332 seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
333 {
334 #ifdef CONFIG_KALLSYMS
335         char str[KSYM_SYMBOL_LEN];
336         const char *name;
337
338         kallsyms_lookup(address, NULL, NULL, NULL, str);
339
340         name = kretprobed(str);
341
342         return trace_seq_printf(s, fmt, name);
343 #endif
344         return 1;
345 }
346
347 static int
348 seq_print_sym_offset(struct trace_seq *s, const char *fmt,
349                      unsigned long address)
350 {
351 #ifdef CONFIG_KALLSYMS
352         char str[KSYM_SYMBOL_LEN];
353         const char *name;
354
355         sprint_symbol(str, address);
356         name = kretprobed(str);
357
358         return trace_seq_printf(s, fmt, name);
359 #endif
360         return 1;
361 }
362
363 #ifndef CONFIG_64BIT
364 # define IP_FMT "%08lx"
365 #else
366 # define IP_FMT "%016lx"
367 #endif
368
369 int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
370                       unsigned long ip, unsigned long sym_flags)
371 {
372         struct file *file = NULL;
373         unsigned long vmstart = 0;
374         int ret = 1;
375
376         if (mm) {
377                 const struct vm_area_struct *vma;
378
379                 down_read(&mm->mmap_sem);
380                 vma = find_vma(mm, ip);
381                 if (vma) {
382                         file = vma->vm_file;
383                         vmstart = vma->vm_start;
384                 }
385                 if (file) {
386                         ret = trace_seq_path(s, &file->f_path);
387                         if (ret)
388                                 ret = trace_seq_printf(s, "[+0x%lx]",
389                                                        ip - vmstart);
390                 }
391                 up_read(&mm->mmap_sem);
392         }
393         if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
394                 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
395         return ret;
396 }
397
398 int
399 seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
400                       unsigned long sym_flags)
401 {
402         struct mm_struct *mm = NULL;
403         int ret = 1;
404         unsigned int i;
405
406         if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
407                 struct task_struct *task;
408                 /*
409                  * we do the lookup on the thread group leader,
410                  * since individual threads might have already quit!
411                  */
412                 rcu_read_lock();
413                 task = find_task_by_vpid(entry->tgid);
414                 if (task)
415                         mm = get_task_mm(task);
416                 rcu_read_unlock();
417         }
418
419         for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
420                 unsigned long ip = entry->caller[i];
421
422                 if (ip == ULONG_MAX || !ret)
423                         break;
424                 if (ret)
425                         ret = trace_seq_puts(s, " => ");
426                 if (!ip) {
427                         if (ret)
428                                 ret = trace_seq_puts(s, "??");
429                         if (ret)
430                                 ret = trace_seq_puts(s, "\n");
431                         continue;
432                 }
433                 if (!ret)
434                         break;
435                 if (ret)
436                         ret = seq_print_user_ip(s, mm, ip, sym_flags);
437                 ret = trace_seq_puts(s, "\n");
438         }
439
440         if (mm)
441                 mmput(mm);
442         return ret;
443 }
444
445 int
446 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
447 {
448         int ret;
449
450         if (!ip)
451                 return trace_seq_printf(s, "0");
452
453         if (sym_flags & TRACE_ITER_SYM_OFFSET)
454                 ret = seq_print_sym_offset(s, "%s", ip);
455         else
456                 ret = seq_print_sym_short(s, "%s", ip);
457
458         if (!ret)
459                 return 0;
460
461         if (sym_flags & TRACE_ITER_SYM_ADDR)
462                 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
463         return ret;
464 }
465
466 /**
467  * trace_print_lat_fmt - print the irq, preempt and lockdep fields
468  * @s: trace seq struct to write to
469  * @entry: The trace entry field from the ring buffer
470  *
471  * Prints the generic fields of irqs off, in hard or softirq, preempt
472  * count and lock depth.
473  */
474 int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
475 {
476         int hardirq, softirq;
477         int ret;
478
479         hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
480         softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
481
482         if (!trace_seq_printf(s, "%c%c%c",
483                               (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
484                                 (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ?
485                                   'X' : '.',
486                               (entry->flags & TRACE_FLAG_NEED_RESCHED) ?
487                                 'N' : '.',
488                               (hardirq && softirq) ? 'H' :
489                                 hardirq ? 'h' : softirq ? 's' : '.'))
490                 return 0;
491
492         if (entry->preempt_count)
493                 ret = trace_seq_printf(s, "%x", entry->preempt_count);
494         else
495                 ret = trace_seq_putc(s, '.');
496
497         if (!ret)
498                 return 0;
499
500         if (entry->lock_depth < 0)
501                 return trace_seq_putc(s, '.');
502
503         return trace_seq_printf(s, "%d", entry->lock_depth);
504 }
505
506 static int
507 lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
508 {
509         char comm[TASK_COMM_LEN];
510
511         trace_find_cmdline(entry->pid, comm);
512
513         if (!trace_seq_printf(s, "%8.8s-%-5d %3d",
514                               comm, entry->pid, cpu))
515                 return 0;
516
517         return trace_print_lat_fmt(s, entry);
518 }
519
520 static unsigned long preempt_mark_thresh = 100;
521
522 static int
523 lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
524                     unsigned long rel_usecs)
525 {
526         return trace_seq_printf(s, " %4lldus%c: ", abs_usecs,
527                                 rel_usecs > preempt_mark_thresh ? '!' :
528                                   rel_usecs > 1 ? '+' : ' ');
529 }
530
531 int trace_print_context(struct trace_iterator *iter)
532 {
533         struct trace_seq *s = &iter->seq;
534         struct trace_entry *entry = iter->ent;
535         unsigned long long t = ns2usecs(iter->ts);
536         unsigned long usec_rem = do_div(t, USEC_PER_SEC);
537         unsigned long secs = (unsigned long)t;
538         char comm[TASK_COMM_LEN];
539
540         trace_find_cmdline(entry->pid, comm);
541
542         return trace_seq_printf(s, "%16s-%-5d [%03d] %5lu.%06lu: ",
543                                 comm, entry->pid, iter->cpu, secs, usec_rem);
544 }
545
546 int trace_print_lat_context(struct trace_iterator *iter)
547 {
548         u64 next_ts;
549         int ret;
550         struct trace_seq *s = &iter->seq;
551         struct trace_entry *entry = iter->ent,
552                            *next_entry = trace_find_next_entry(iter, NULL,
553                                                                &next_ts);
554         unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
555         unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
556         unsigned long rel_usecs;
557
558         if (!next_entry)
559                 next_ts = iter->ts;
560         rel_usecs = ns2usecs(next_ts - iter->ts);
561
562         if (verbose) {
563                 char comm[TASK_COMM_LEN];
564
565                 trace_find_cmdline(entry->pid, comm);
566
567                 ret = trace_seq_printf(s, "%16s %5d %3d %d %08x %08lx [%08llx]"
568                                        " %ld.%03ldms (+%ld.%03ldms): ", comm,
569                                        entry->pid, iter->cpu, entry->flags,
570                                        entry->preempt_count, iter->idx,
571                                        ns2usecs(iter->ts),
572                                        abs_usecs / USEC_PER_MSEC,
573                                        abs_usecs % USEC_PER_MSEC,
574                                        rel_usecs / USEC_PER_MSEC,
575                                        rel_usecs % USEC_PER_MSEC);
576         } else {
577                 ret = lat_print_generic(s, entry, iter->cpu);
578                 if (ret)
579                         ret = lat_print_timestamp(s, abs_usecs, rel_usecs);
580         }
581
582         return ret;
583 }
584
585 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
586
587 static int task_state_char(unsigned long state)
588 {
589         int bit = state ? __ffs(state) + 1 : 0;
590
591         return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
592 }
593
594 /**
595  * ftrace_find_event - find a registered event
596  * @type: the type of event to look for
597  *
598  * Returns an event of type @type otherwise NULL
599  * Called with trace_event_read_lock() held.
600  */
601 struct trace_event *ftrace_find_event(int type)
602 {
603         struct trace_event *event;
604         struct hlist_node *n;
605         unsigned key;
606
607         key = type & (EVENT_HASHSIZE - 1);
608
609         hlist_for_each_entry(event, n, &event_hash[key], node) {
610                 if (event->type == type)
611                         return event;
612         }
613
614         return NULL;
615 }
616
617 static LIST_HEAD(ftrace_event_list);
618
619 static int trace_search_list(struct list_head **list)
620 {
621         struct trace_event *e;
622         int last = __TRACE_LAST_TYPE;
623
624         if (list_empty(&ftrace_event_list)) {
625                 *list = &ftrace_event_list;
626                 return last + 1;
627         }
628
629         /*
630          * We used up all possible max events,
631          * lets see if somebody freed one.
632          */
633         list_for_each_entry(e, &ftrace_event_list, list) {
634                 if (e->type != last + 1)
635                         break;
636                 last++;
637         }
638
639         /* Did we used up all 65 thousand events??? */
640         if ((last + 1) > FTRACE_MAX_EVENT)
641                 return 0;
642
643         *list = &e->list;
644         return last + 1;
645 }
646
647 void trace_event_read_lock(void)
648 {
649         down_read(&trace_event_mutex);
650 }
651
652 void trace_event_read_unlock(void)
653 {
654         up_read(&trace_event_mutex);
655 }
656
657 /**
658  * register_ftrace_event - register output for an event type
659  * @event: the event type to register
660  *
661  * Event types are stored in a hash and this hash is used to
662  * find a way to print an event. If the @event->type is set
663  * then it will use that type, otherwise it will assign a
664  * type to use.
665  *
666  * If you assign your own type, please make sure it is added
667  * to the trace_type enum in trace.h, to avoid collisions
668  * with the dynamic types.
669  *
670  * Returns the event type number or zero on error.
671  */
672 int register_ftrace_event(struct trace_event *event)
673 {
674         unsigned key;
675         int ret = 0;
676
677         down_write(&trace_event_mutex);
678
679         if (WARN_ON(!event))
680                 goto out;
681
682         INIT_LIST_HEAD(&event->list);
683
684         if (!event->type) {
685                 struct list_head *list = NULL;
686
687                 if (next_event_type > FTRACE_MAX_EVENT) {
688
689                         event->type = trace_search_list(&list);
690                         if (!event->type)
691                                 goto out;
692
693                 } else {
694                         
695                         event->type = next_event_type++;
696                         list = &ftrace_event_list;
697                 }
698
699                 if (WARN_ON(ftrace_find_event(event->type)))
700                         goto out;
701
702                 list_add_tail(&event->list, list);
703
704         } else if (event->type > __TRACE_LAST_TYPE) {
705                 printk(KERN_WARNING "Need to add type to trace.h\n");
706                 WARN_ON(1);
707                 goto out;
708         } else {
709                 /* Is this event already used */
710                 if (ftrace_find_event(event->type))
711                         goto out;
712         }
713
714         if (event->trace == NULL)
715                 event->trace = trace_nop_print;
716         if (event->raw == NULL)
717                 event->raw = trace_nop_print;
718         if (event->hex == NULL)
719                 event->hex = trace_nop_print;
720         if (event->binary == NULL)
721                 event->binary = trace_nop_print;
722
723         key = event->type & (EVENT_HASHSIZE - 1);
724
725         hlist_add_head(&event->node, &event_hash[key]);
726
727         ret = event->type;
728  out:
729         up_write(&trace_event_mutex);
730
731         return ret;
732 }
733 EXPORT_SYMBOL_GPL(register_ftrace_event);
734
735 /*
736  * Used by module code with the trace_event_mutex held for write.
737  */
738 int __unregister_ftrace_event(struct trace_event *event)
739 {
740         hlist_del(&event->node);
741         list_del(&event->list);
742         return 0;
743 }
744
745 /**
746  * unregister_ftrace_event - remove a no longer used event
747  * @event: the event to remove
748  */
749 int unregister_ftrace_event(struct trace_event *event)
750 {
751         down_write(&trace_event_mutex);
752         __unregister_ftrace_event(event);
753         up_write(&trace_event_mutex);
754
755         return 0;
756 }
757 EXPORT_SYMBOL_GPL(unregister_ftrace_event);
758
759 /*
760  * Standard events
761  */
762
763 enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags)
764 {
765         return TRACE_TYPE_HANDLED;
766 }
767
768 /* TRACE_FN */
769 static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags)
770 {
771         struct ftrace_entry *field;
772         struct trace_seq *s = &iter->seq;
773
774         trace_assign_type(field, iter->ent);
775
776         if (!seq_print_ip_sym(s, field->ip, flags))
777                 goto partial;
778
779         if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) {
780                 if (!trace_seq_printf(s, " <-"))
781                         goto partial;
782                 if (!seq_print_ip_sym(s,
783                                       field->parent_ip,
784                                       flags))
785                         goto partial;
786         }
787         if (!trace_seq_printf(s, "\n"))
788                 goto partial;
789
790         return TRACE_TYPE_HANDLED;
791
792  partial:
793         return TRACE_TYPE_PARTIAL_LINE;
794 }
795
796 static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags)
797 {
798         struct ftrace_entry *field;
799
800         trace_assign_type(field, iter->ent);
801
802         if (!trace_seq_printf(&iter->seq, "%lx %lx\n",
803                               field->ip,
804                               field->parent_ip))
805                 return TRACE_TYPE_PARTIAL_LINE;
806
807         return TRACE_TYPE_HANDLED;
808 }
809
810 static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags)
811 {
812         struct ftrace_entry *field;
813         struct trace_seq *s = &iter->seq;
814
815         trace_assign_type(field, iter->ent);
816
817         SEQ_PUT_HEX_FIELD_RET(s, field->ip);
818         SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
819
820         return TRACE_TYPE_HANDLED;
821 }
822
823 static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags)
824 {
825         struct ftrace_entry *field;
826         struct trace_seq *s = &iter->seq;
827
828         trace_assign_type(field, iter->ent);
829
830         SEQ_PUT_FIELD_RET(s, field->ip);
831         SEQ_PUT_FIELD_RET(s, field->parent_ip);
832
833         return TRACE_TYPE_HANDLED;
834 }
835
836 static struct trace_event trace_fn_event = {
837         .type           = TRACE_FN,
838         .trace          = trace_fn_trace,
839         .raw            = trace_fn_raw,
840         .hex            = trace_fn_hex,
841         .binary         = trace_fn_bin,
842 };
843
844 /* TRACE_CTX an TRACE_WAKE */
845 static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
846                                              char *delim)
847 {
848         struct ctx_switch_entry *field;
849         char comm[TASK_COMM_LEN];
850         int S, T;
851
852
853         trace_assign_type(field, iter->ent);
854
855         T = task_state_char(field->next_state);
856         S = task_state_char(field->prev_state);
857         trace_find_cmdline(field->next_pid, comm);
858         if (!trace_seq_printf(&iter->seq,
859                               " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
860                               field->prev_pid,
861                               field->prev_prio,
862                               S, delim,
863                               field->next_cpu,
864                               field->next_pid,
865                               field->next_prio,
866                               T, comm))
867                 return TRACE_TYPE_PARTIAL_LINE;
868
869         return TRACE_TYPE_HANDLED;
870 }
871
872 static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags)
873 {
874         return trace_ctxwake_print(iter, "==>");
875 }
876
877 static enum print_line_t trace_wake_print(struct trace_iterator *iter,
878                                           int flags)
879 {
880         return trace_ctxwake_print(iter, "  +");
881 }
882
883 static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
884 {
885         struct ctx_switch_entry *field;
886         int T;
887
888         trace_assign_type(field, iter->ent);
889
890         if (!S)
891                 S = task_state_char(field->prev_state);
892         T = task_state_char(field->next_state);
893         if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
894                               field->prev_pid,
895                               field->prev_prio,
896                               S,
897                               field->next_cpu,
898                               field->next_pid,
899                               field->next_prio,
900                               T))
901                 return TRACE_TYPE_PARTIAL_LINE;
902
903         return TRACE_TYPE_HANDLED;
904 }
905
906 static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags)
907 {
908         return trace_ctxwake_raw(iter, 0);
909 }
910
911 static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags)
912 {
913         return trace_ctxwake_raw(iter, '+');
914 }
915
916
917 static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
918 {
919         struct ctx_switch_entry *field;
920         struct trace_seq *s = &iter->seq;
921         int T;
922
923         trace_assign_type(field, iter->ent);
924
925         if (!S)
926                 S = task_state_char(field->prev_state);
927         T = task_state_char(field->next_state);
928
929         SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
930         SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
931         SEQ_PUT_HEX_FIELD_RET(s, S);
932         SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu);
933         SEQ_PUT_HEX_FIELD_RET(s, field->next_pid);
934         SEQ_PUT_HEX_FIELD_RET(s, field->next_prio);
935         SEQ_PUT_HEX_FIELD_RET(s, T);
936
937         return TRACE_TYPE_HANDLED;
938 }
939
940 static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags)
941 {
942         return trace_ctxwake_hex(iter, 0);
943 }
944
945 static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags)
946 {
947         return trace_ctxwake_hex(iter, '+');
948 }
949
950 static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter,
951                                            int flags)
952 {
953         struct ctx_switch_entry *field;
954         struct trace_seq *s = &iter->seq;
955
956         trace_assign_type(field, iter->ent);
957
958         SEQ_PUT_FIELD_RET(s, field->prev_pid);
959         SEQ_PUT_FIELD_RET(s, field->prev_prio);
960         SEQ_PUT_FIELD_RET(s, field->prev_state);
961         SEQ_PUT_FIELD_RET(s, field->next_pid);
962         SEQ_PUT_FIELD_RET(s, field->next_prio);
963         SEQ_PUT_FIELD_RET(s, field->next_state);
964
965         return TRACE_TYPE_HANDLED;
966 }
967
968 static struct trace_event trace_ctx_event = {
969         .type           = TRACE_CTX,
970         .trace          = trace_ctx_print,
971         .raw            = trace_ctx_raw,
972         .hex            = trace_ctx_hex,
973         .binary         = trace_ctxwake_bin,
974 };
975
976 static struct trace_event trace_wake_event = {
977         .type           = TRACE_WAKE,
978         .trace          = trace_wake_print,
979         .raw            = trace_wake_raw,
980         .hex            = trace_wake_hex,
981         .binary         = trace_ctxwake_bin,
982 };
983
984 /* TRACE_SPECIAL */
985 static enum print_line_t trace_special_print(struct trace_iterator *iter,
986                                              int flags)
987 {
988         struct special_entry *field;
989
990         trace_assign_type(field, iter->ent);
991
992         if (!trace_seq_printf(&iter->seq, "# %ld %ld %ld\n",
993                               field->arg1,
994                               field->arg2,
995                               field->arg3))
996                 return TRACE_TYPE_PARTIAL_LINE;
997
998         return TRACE_TYPE_HANDLED;
999 }
1000
1001 static enum print_line_t trace_special_hex(struct trace_iterator *iter,
1002                                            int flags)
1003 {
1004         struct special_entry *field;
1005         struct trace_seq *s = &iter->seq;
1006
1007         trace_assign_type(field, iter->ent);
1008
1009         SEQ_PUT_HEX_FIELD_RET(s, field->arg1);
1010         SEQ_PUT_HEX_FIELD_RET(s, field->arg2);
1011         SEQ_PUT_HEX_FIELD_RET(s, field->arg3);
1012
1013         return TRACE_TYPE_HANDLED;
1014 }
1015
1016 static enum print_line_t trace_special_bin(struct trace_iterator *iter,
1017                                            int flags)
1018 {
1019         struct special_entry *field;
1020         struct trace_seq *s = &iter->seq;
1021
1022         trace_assign_type(field, iter->ent);
1023
1024         SEQ_PUT_FIELD_RET(s, field->arg1);
1025         SEQ_PUT_FIELD_RET(s, field->arg2);
1026         SEQ_PUT_FIELD_RET(s, field->arg3);
1027
1028         return TRACE_TYPE_HANDLED;
1029 }
1030
1031 static struct trace_event trace_special_event = {
1032         .type           = TRACE_SPECIAL,
1033         .trace          = trace_special_print,
1034         .raw            = trace_special_print,
1035         .hex            = trace_special_hex,
1036         .binary         = trace_special_bin,
1037 };
1038
1039 /* TRACE_STACK */
1040
1041 static enum print_line_t trace_stack_print(struct trace_iterator *iter,
1042                                            int flags)
1043 {
1044         struct stack_entry *field;
1045         struct trace_seq *s = &iter->seq;
1046         int i;
1047
1048         trace_assign_type(field, iter->ent);
1049
1050         if (!trace_seq_puts(s, "<stack trace>\n"))
1051                 goto partial;
1052         for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1053                 if (!field->caller[i] || (field->caller[i] == ULONG_MAX))
1054                         break;
1055                 if (!trace_seq_puts(s, " => "))
1056                         goto partial;
1057
1058                 if (!seq_print_ip_sym(s, field->caller[i], flags))
1059                         goto partial;
1060                 if (!trace_seq_puts(s, "\n"))
1061                         goto partial;
1062         }
1063
1064         return TRACE_TYPE_HANDLED;
1065
1066  partial:
1067         return TRACE_TYPE_PARTIAL_LINE;
1068 }
1069
1070 static struct trace_event trace_stack_event = {
1071         .type           = TRACE_STACK,
1072         .trace          = trace_stack_print,
1073         .raw            = trace_special_print,
1074         .hex            = trace_special_hex,
1075         .binary         = trace_special_bin,
1076 };
1077
1078 /* TRACE_USER_STACK */
1079 static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
1080                                                 int flags)
1081 {
1082         struct userstack_entry *field;
1083         struct trace_seq *s = &iter->seq;
1084
1085         trace_assign_type(field, iter->ent);
1086
1087         if (!trace_seq_puts(s, "<user stack trace>\n"))
1088                 goto partial;
1089
1090         if (!seq_print_userip_objs(field, s, flags))
1091                 goto partial;
1092
1093         return TRACE_TYPE_HANDLED;
1094
1095  partial:
1096         return TRACE_TYPE_PARTIAL_LINE;
1097 }
1098
1099 static struct trace_event trace_user_stack_event = {
1100         .type           = TRACE_USER_STACK,
1101         .trace          = trace_user_stack_print,
1102         .raw            = trace_special_print,
1103         .hex            = trace_special_hex,
1104         .binary         = trace_special_bin,
1105 };
1106
1107 /* TRACE_BPRINT */
1108 static enum print_line_t
1109 trace_bprint_print(struct trace_iterator *iter, int flags)
1110 {
1111         struct trace_entry *entry = iter->ent;
1112         struct trace_seq *s = &iter->seq;
1113         struct bprint_entry *field;
1114
1115         trace_assign_type(field, entry);
1116
1117         if (!seq_print_ip_sym(s, field->ip, flags))
1118                 goto partial;
1119
1120         if (!trace_seq_puts(s, ": "))
1121                 goto partial;
1122
1123         if (!trace_seq_bprintf(s, field->fmt, field->buf))
1124                 goto partial;
1125
1126         return TRACE_TYPE_HANDLED;
1127
1128  partial:
1129         return TRACE_TYPE_PARTIAL_LINE;
1130 }
1131
1132
1133 static enum print_line_t
1134 trace_bprint_raw(struct trace_iterator *iter, int flags)
1135 {
1136         struct bprint_entry *field;
1137         struct trace_seq *s = &iter->seq;
1138
1139         trace_assign_type(field, iter->ent);
1140
1141         if (!trace_seq_printf(s, ": %lx : ", field->ip))
1142                 goto partial;
1143
1144         if (!trace_seq_bprintf(s, field->fmt, field->buf))
1145                 goto partial;
1146
1147         return TRACE_TYPE_HANDLED;
1148
1149  partial:
1150         return TRACE_TYPE_PARTIAL_LINE;
1151 }
1152
1153
1154 static struct trace_event trace_bprint_event = {
1155         .type           = TRACE_BPRINT,
1156         .trace          = trace_bprint_print,
1157         .raw            = trace_bprint_raw,
1158 };
1159
1160 /* TRACE_PRINT */
1161 static enum print_line_t trace_print_print(struct trace_iterator *iter,
1162                                            int flags)
1163 {
1164         struct print_entry *field;
1165         struct trace_seq *s = &iter->seq;
1166
1167         trace_assign_type(field, iter->ent);
1168
1169         if (!seq_print_ip_sym(s, field->ip, flags))
1170                 goto partial;
1171
1172         if (!trace_seq_printf(s, ": %s", field->buf))
1173                 goto partial;
1174
1175         return TRACE_TYPE_HANDLED;
1176
1177  partial:
1178         return TRACE_TYPE_PARTIAL_LINE;
1179 }
1180
1181 static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags)
1182 {
1183         struct print_entry *field;
1184
1185         trace_assign_type(field, iter->ent);
1186
1187         if (!trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf))
1188                 goto partial;
1189
1190         return TRACE_TYPE_HANDLED;
1191
1192  partial:
1193         return TRACE_TYPE_PARTIAL_LINE;
1194 }
1195
1196 static struct trace_event trace_print_event = {
1197         .type           = TRACE_PRINT,
1198         .trace          = trace_print_print,
1199         .raw            = trace_print_raw,
1200 };
1201
1202
1203 static struct trace_event *events[] __initdata = {
1204         &trace_fn_event,
1205         &trace_ctx_event,
1206         &trace_wake_event,
1207         &trace_special_event,
1208         &trace_stack_event,
1209         &trace_user_stack_event,
1210         &trace_bprint_event,
1211         &trace_print_event,
1212         NULL
1213 };
1214
1215 __init static int init_events(void)
1216 {
1217         struct trace_event *event;
1218         int i, ret;
1219
1220         for (i = 0; events[i]; i++) {
1221                 event = events[i];
1222
1223                 ret = register_ftrace_event(event);
1224                 if (!ret) {
1225                         printk(KERN_WARNING "event %d failed to register\n",
1226                                event->type);
1227                         WARN_ON_ONCE(1);
1228                 }
1229         }
1230
1231         return 0;
1232 }
1233 device_initcall(init_events);