tracing: add __print_flags for events
[safe/jmp/linux-2.6] / kernel / trace / trace_output.c
1 /*
2  * trace_output.c
3  *
4  * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5  *
6  */
7
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/ftrace.h>
11
12 #include "trace_output.h"
13
14 /* must be a power of 2 */
15 #define EVENT_HASHSIZE  128
16
17 static DECLARE_RWSEM(trace_event_mutex);
18
19 DEFINE_PER_CPU(struct trace_seq, ftrace_event_seq);
20
21 static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
22
23 static int next_event_type = __TRACE_LAST_TYPE + 1;
24
25 void trace_print_seq(struct seq_file *m, struct trace_seq *s)
26 {
27         int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
28
29         s->buffer[len] = 0;
30         seq_puts(m, s->buffer);
31
32         trace_seq_init(s);
33 }
34
35 enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
36 {
37         struct trace_seq *s = &iter->seq;
38         struct trace_entry *entry = iter->ent;
39         struct bprint_entry *field;
40         int ret;
41
42         trace_assign_type(field, entry);
43
44         ret = trace_seq_bprintf(s, field->fmt, field->buf);
45         if (!ret)
46                 return TRACE_TYPE_PARTIAL_LINE;
47
48         return TRACE_TYPE_HANDLED;
49 }
50
51 enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
52 {
53         struct trace_seq *s = &iter->seq;
54         struct trace_entry *entry = iter->ent;
55         struct print_entry *field;
56         int ret;
57
58         trace_assign_type(field, entry);
59
60         ret = trace_seq_printf(s, "%s", field->buf);
61         if (!ret)
62                 return TRACE_TYPE_PARTIAL_LINE;
63
64         return TRACE_TYPE_HANDLED;
65 }
66
67 /**
68  * trace_seq_printf - sequence printing of trace information
69  * @s: trace sequence descriptor
70  * @fmt: printf format string
71  *
72  * The tracer may use either sequence operations or its own
73  * copy to user routines. To simplify formating of a trace
74  * trace_seq_printf is used to store strings into a special
75  * buffer (@s). Then the output may be either used by
76  * the sequencer or pulled into another buffer.
77  */
78 int
79 trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
80 {
81         int len = (PAGE_SIZE - 1) - s->len;
82         va_list ap;
83         int ret;
84
85         if (!len)
86                 return 0;
87
88         va_start(ap, fmt);
89         ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
90         va_end(ap);
91
92         /* If we can't write it all, don't bother writing anything */
93         if (ret >= len)
94                 return 0;
95
96         s->len += ret;
97
98         return len;
99 }
100 EXPORT_SYMBOL_GPL(trace_seq_printf);
101
102 int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
103 {
104         int len = (PAGE_SIZE - 1) - s->len;
105         int ret;
106
107         if (!len)
108                 return 0;
109
110         ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
111
112         /* If we can't write it all, don't bother writing anything */
113         if (ret >= len)
114                 return 0;
115
116         s->len += ret;
117
118         return len;
119 }
120
121 /**
122  * trace_seq_puts - trace sequence printing of simple string
123  * @s: trace sequence descriptor
124  * @str: simple string to record
125  *
126  * The tracer may use either the sequence operations or its own
127  * copy to user routines. This function records a simple string
128  * into a special buffer (@s) for later retrieval by a sequencer
129  * or other mechanism.
130  */
131 int trace_seq_puts(struct trace_seq *s, const char *str)
132 {
133         int len = strlen(str);
134
135         if (len > ((PAGE_SIZE - 1) - s->len))
136                 return 0;
137
138         memcpy(s->buffer + s->len, str, len);
139         s->len += len;
140
141         return len;
142 }
143
144 int trace_seq_putc(struct trace_seq *s, unsigned char c)
145 {
146         if (s->len >= (PAGE_SIZE - 1))
147                 return 0;
148
149         s->buffer[s->len++] = c;
150
151         return 1;
152 }
153
154 int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len)
155 {
156         if (len > ((PAGE_SIZE - 1) - s->len))
157                 return 0;
158
159         memcpy(s->buffer + s->len, mem, len);
160         s->len += len;
161
162         return len;
163 }
164
165 int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, size_t len)
166 {
167         unsigned char hex[HEX_CHARS];
168         const unsigned char *data = mem;
169         int i, j;
170
171 #ifdef __BIG_ENDIAN
172         for (i = 0, j = 0; i < len; i++) {
173 #else
174         for (i = len-1, j = 0; i >= 0; i--) {
175 #endif
176                 hex[j++] = hex_asc_hi(data[i]);
177                 hex[j++] = hex_asc_lo(data[i]);
178         }
179         hex[j++] = ' ';
180
181         return trace_seq_putmem(s, hex, j);
182 }
183
184 void *trace_seq_reserve(struct trace_seq *s, size_t len)
185 {
186         void *ret;
187
188         if (len > ((PAGE_SIZE - 1) - s->len))
189                 return NULL;
190
191         ret = s->buffer + s->len;
192         s->len += len;
193
194         return ret;
195 }
196
197 int trace_seq_path(struct trace_seq *s, struct path *path)
198 {
199         unsigned char *p;
200
201         if (s->len >= (PAGE_SIZE - 1))
202                 return 0;
203         p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
204         if (!IS_ERR(p)) {
205                 p = mangle_path(s->buffer + s->len, p, "\n");
206                 if (p) {
207                         s->len = p - s->buffer;
208                         return 1;
209                 }
210         } else {
211                 s->buffer[s->len++] = '?';
212                 return 1;
213         }
214
215         return 0;
216 }
217
218 const char *
219 ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
220                        unsigned long flags,
221                        const struct trace_print_flags *flag_array)
222 {
223         unsigned long mask;
224         const char *str;
225         int i;
226
227         trace_seq_init(p);
228
229         for (i = 0;  flag_array[i].name && flags; i++) {
230
231                 mask = flag_array[i].mask;
232                 if ((flags & mask) != mask)
233                         continue;
234
235                 str = flag_array[i].name;
236                 flags &= ~mask;
237                 if (p->len && delim)
238                         trace_seq_puts(p, delim);
239                 trace_seq_puts(p, str);
240         }
241
242         /* check for left over flags */
243         if (flags) {
244                 if (p->len && delim)
245                         trace_seq_puts(p, delim);
246                 trace_seq_printf(p, "0x%lx", flags);
247         }
248
249         trace_seq_putc(p, 0);
250
251         return p->buffer;
252 }
253
254 #ifdef CONFIG_KRETPROBES
255 static inline const char *kretprobed(const char *name)
256 {
257         static const char tramp_name[] = "kretprobe_trampoline";
258         int size = sizeof(tramp_name);
259
260         if (strncmp(tramp_name, name, size) == 0)
261                 return "[unknown/kretprobe'd]";
262         return name;
263 }
264 #else
265 static inline const char *kretprobed(const char *name)
266 {
267         return name;
268 }
269 #endif /* CONFIG_KRETPROBES */
270
271 static int
272 seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
273 {
274 #ifdef CONFIG_KALLSYMS
275         char str[KSYM_SYMBOL_LEN];
276         const char *name;
277
278         kallsyms_lookup(address, NULL, NULL, NULL, str);
279
280         name = kretprobed(str);
281
282         return trace_seq_printf(s, fmt, name);
283 #endif
284         return 1;
285 }
286
287 static int
288 seq_print_sym_offset(struct trace_seq *s, const char *fmt,
289                      unsigned long address)
290 {
291 #ifdef CONFIG_KALLSYMS
292         char str[KSYM_SYMBOL_LEN];
293         const char *name;
294
295         sprint_symbol(str, address);
296         name = kretprobed(str);
297
298         return trace_seq_printf(s, fmt, name);
299 #endif
300         return 1;
301 }
302
303 #ifndef CONFIG_64BIT
304 # define IP_FMT "%08lx"
305 #else
306 # define IP_FMT "%016lx"
307 #endif
308
309 int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
310                       unsigned long ip, unsigned long sym_flags)
311 {
312         struct file *file = NULL;
313         unsigned long vmstart = 0;
314         int ret = 1;
315
316         if (mm) {
317                 const struct vm_area_struct *vma;
318
319                 down_read(&mm->mmap_sem);
320                 vma = find_vma(mm, ip);
321                 if (vma) {
322                         file = vma->vm_file;
323                         vmstart = vma->vm_start;
324                 }
325                 if (file) {
326                         ret = trace_seq_path(s, &file->f_path);
327                         if (ret)
328                                 ret = trace_seq_printf(s, "[+0x%lx]",
329                                                        ip - vmstart);
330                 }
331                 up_read(&mm->mmap_sem);
332         }
333         if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
334                 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
335         return ret;
336 }
337
338 int
339 seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
340                       unsigned long sym_flags)
341 {
342         struct mm_struct *mm = NULL;
343         int ret = 1;
344         unsigned int i;
345
346         if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
347                 struct task_struct *task;
348                 /*
349                  * we do the lookup on the thread group leader,
350                  * since individual threads might have already quit!
351                  */
352                 rcu_read_lock();
353                 task = find_task_by_vpid(entry->ent.tgid);
354                 if (task)
355                         mm = get_task_mm(task);
356                 rcu_read_unlock();
357         }
358
359         for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
360                 unsigned long ip = entry->caller[i];
361
362                 if (ip == ULONG_MAX || !ret)
363                         break;
364                 if (i && ret)
365                         ret = trace_seq_puts(s, " <- ");
366                 if (!ip) {
367                         if (ret)
368                                 ret = trace_seq_puts(s, "??");
369                         continue;
370                 }
371                 if (!ret)
372                         break;
373                 if (ret)
374                         ret = seq_print_user_ip(s, mm, ip, sym_flags);
375         }
376
377         if (mm)
378                 mmput(mm);
379         return ret;
380 }
381
382 int
383 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
384 {
385         int ret;
386
387         if (!ip)
388                 return trace_seq_printf(s, "0");
389
390         if (sym_flags & TRACE_ITER_SYM_OFFSET)
391                 ret = seq_print_sym_offset(s, "%s", ip);
392         else
393                 ret = seq_print_sym_short(s, "%s", ip);
394
395         if (!ret)
396                 return 0;
397
398         if (sym_flags & TRACE_ITER_SYM_ADDR)
399                 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
400         return ret;
401 }
402
403 static int
404 lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
405 {
406         int hardirq, softirq;
407         char comm[TASK_COMM_LEN];
408
409         trace_find_cmdline(entry->pid, comm);
410         hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
411         softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
412
413         if (!trace_seq_printf(s, "%8.8s-%-5d %3d%c%c%c",
414                               comm, entry->pid, cpu,
415                               (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
416                                 (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ?
417                                   'X' : '.',
418                               (entry->flags & TRACE_FLAG_NEED_RESCHED) ?
419                                 'N' : '.',
420                               (hardirq && softirq) ? 'H' :
421                                 hardirq ? 'h' : softirq ? 's' : '.'))
422                 return 0;
423
424         if (entry->preempt_count)
425                 return trace_seq_printf(s, "%x", entry->preempt_count);
426         return trace_seq_puts(s, ".");
427 }
428
429 static unsigned long preempt_mark_thresh = 100;
430
431 static int
432 lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
433                     unsigned long rel_usecs)
434 {
435         return trace_seq_printf(s, " %4lldus%c: ", abs_usecs,
436                                 rel_usecs > preempt_mark_thresh ? '!' :
437                                   rel_usecs > 1 ? '+' : ' ');
438 }
439
440 int trace_print_context(struct trace_iterator *iter)
441 {
442         struct trace_seq *s = &iter->seq;
443         struct trace_entry *entry = iter->ent;
444         unsigned long long t = ns2usecs(iter->ts);
445         unsigned long usec_rem = do_div(t, USEC_PER_SEC);
446         unsigned long secs = (unsigned long)t;
447         char comm[TASK_COMM_LEN];
448
449         trace_find_cmdline(entry->pid, comm);
450
451         return trace_seq_printf(s, "%16s-%-5d [%03d] %5lu.%06lu: ",
452                                 comm, entry->pid, iter->cpu, secs, usec_rem);
453 }
454
455 int trace_print_lat_context(struct trace_iterator *iter)
456 {
457         u64 next_ts;
458         int ret;
459         struct trace_seq *s = &iter->seq;
460         struct trace_entry *entry = iter->ent,
461                            *next_entry = trace_find_next_entry(iter, NULL,
462                                                                &next_ts);
463         unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
464         unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
465         unsigned long rel_usecs;
466
467         if (!next_entry)
468                 next_ts = iter->ts;
469         rel_usecs = ns2usecs(next_ts - iter->ts);
470
471         if (verbose) {
472                 char comm[TASK_COMM_LEN];
473
474                 trace_find_cmdline(entry->pid, comm);
475
476                 ret = trace_seq_printf(s, "%16s %5d %3d %d %08x %08lx [%08llx]"
477                                        " %ld.%03ldms (+%ld.%03ldms): ", comm,
478                                        entry->pid, iter->cpu, entry->flags,
479                                        entry->preempt_count, iter->idx,
480                                        ns2usecs(iter->ts),
481                                        abs_usecs / USEC_PER_MSEC,
482                                        abs_usecs % USEC_PER_MSEC,
483                                        rel_usecs / USEC_PER_MSEC,
484                                        rel_usecs % USEC_PER_MSEC);
485         } else {
486                 ret = lat_print_generic(s, entry, iter->cpu);
487                 if (ret)
488                         ret = lat_print_timestamp(s, abs_usecs, rel_usecs);
489         }
490
491         return ret;
492 }
493
494 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
495
496 static int task_state_char(unsigned long state)
497 {
498         int bit = state ? __ffs(state) + 1 : 0;
499
500         return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
501 }
502
503 /**
504  * ftrace_find_event - find a registered event
505  * @type: the type of event to look for
506  *
507  * Returns an event of type @type otherwise NULL
508  * Called with trace_event_read_lock() held.
509  */
510 struct trace_event *ftrace_find_event(int type)
511 {
512         struct trace_event *event;
513         struct hlist_node *n;
514         unsigned key;
515
516         key = type & (EVENT_HASHSIZE - 1);
517
518         hlist_for_each_entry(event, n, &event_hash[key], node) {
519                 if (event->type == type)
520                         return event;
521         }
522
523         return NULL;
524 }
525
526 static LIST_HEAD(ftrace_event_list);
527
528 static int trace_search_list(struct list_head **list)
529 {
530         struct trace_event *e;
531         int last = __TRACE_LAST_TYPE;
532
533         if (list_empty(&ftrace_event_list)) {
534                 *list = &ftrace_event_list;
535                 return last + 1;
536         }
537
538         /*
539          * We used up all possible max events,
540          * lets see if somebody freed one.
541          */
542         list_for_each_entry(e, &ftrace_event_list, list) {
543                 if (e->type != last + 1)
544                         break;
545                 last++;
546         }
547
548         /* Did we used up all 65 thousand events??? */
549         if ((last + 1) > FTRACE_MAX_EVENT)
550                 return 0;
551
552         *list = &e->list;
553         return last + 1;
554 }
555
556 void trace_event_read_lock(void)
557 {
558         down_read(&trace_event_mutex);
559 }
560
561 void trace_event_read_unlock(void)
562 {
563         up_read(&trace_event_mutex);
564 }
565
566 /**
567  * register_ftrace_event - register output for an event type
568  * @event: the event type to register
569  *
570  * Event types are stored in a hash and this hash is used to
571  * find a way to print an event. If the @event->type is set
572  * then it will use that type, otherwise it will assign a
573  * type to use.
574  *
575  * If you assign your own type, please make sure it is added
576  * to the trace_type enum in trace.h, to avoid collisions
577  * with the dynamic types.
578  *
579  * Returns the event type number or zero on error.
580  */
581 int register_ftrace_event(struct trace_event *event)
582 {
583         unsigned key;
584         int ret = 0;
585
586         down_write(&trace_event_mutex);
587
588         if (WARN_ON(!event))
589                 goto out;
590
591         INIT_LIST_HEAD(&event->list);
592
593         if (!event->type) {
594                 struct list_head *list = NULL;
595
596                 if (next_event_type > FTRACE_MAX_EVENT) {
597
598                         event->type = trace_search_list(&list);
599                         if (!event->type)
600                                 goto out;
601
602                 } else {
603                         
604                         event->type = next_event_type++;
605                         list = &ftrace_event_list;
606                 }
607
608                 if (WARN_ON(ftrace_find_event(event->type)))
609                         goto out;
610
611                 list_add_tail(&event->list, list);
612
613         } else if (event->type > __TRACE_LAST_TYPE) {
614                 printk(KERN_WARNING "Need to add type to trace.h\n");
615                 WARN_ON(1);
616                 goto out;
617         } else {
618                 /* Is this event already used */
619                 if (ftrace_find_event(event->type))
620                         goto out;
621         }
622
623         if (event->trace == NULL)
624                 event->trace = trace_nop_print;
625         if (event->raw == NULL)
626                 event->raw = trace_nop_print;
627         if (event->hex == NULL)
628                 event->hex = trace_nop_print;
629         if (event->binary == NULL)
630                 event->binary = trace_nop_print;
631
632         key = event->type & (EVENT_HASHSIZE - 1);
633
634         hlist_add_head(&event->node, &event_hash[key]);
635
636         ret = event->type;
637  out:
638         up_write(&trace_event_mutex);
639
640         return ret;
641 }
642 EXPORT_SYMBOL_GPL(register_ftrace_event);
643
644 /**
645  * unregister_ftrace_event - remove a no longer used event
646  * @event: the event to remove
647  */
648 int unregister_ftrace_event(struct trace_event *event)
649 {
650         down_write(&trace_event_mutex);
651         hlist_del(&event->node);
652         list_del(&event->list);
653         up_write(&trace_event_mutex);
654
655         return 0;
656 }
657 EXPORT_SYMBOL_GPL(unregister_ftrace_event);
658
659 /*
660  * Standard events
661  */
662
663 enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags)
664 {
665         return TRACE_TYPE_HANDLED;
666 }
667
668 /* TRACE_FN */
669 static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags)
670 {
671         struct ftrace_entry *field;
672         struct trace_seq *s = &iter->seq;
673
674         trace_assign_type(field, iter->ent);
675
676         if (!seq_print_ip_sym(s, field->ip, flags))
677                 goto partial;
678
679         if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) {
680                 if (!trace_seq_printf(s, " <-"))
681                         goto partial;
682                 if (!seq_print_ip_sym(s,
683                                       field->parent_ip,
684                                       flags))
685                         goto partial;
686         }
687         if (!trace_seq_printf(s, "\n"))
688                 goto partial;
689
690         return TRACE_TYPE_HANDLED;
691
692  partial:
693         return TRACE_TYPE_PARTIAL_LINE;
694 }
695
696 static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags)
697 {
698         struct ftrace_entry *field;
699
700         trace_assign_type(field, iter->ent);
701
702         if (!trace_seq_printf(&iter->seq, "%lx %lx\n",
703                               field->ip,
704                               field->parent_ip))
705                 return TRACE_TYPE_PARTIAL_LINE;
706
707         return TRACE_TYPE_HANDLED;
708 }
709
710 static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags)
711 {
712         struct ftrace_entry *field;
713         struct trace_seq *s = &iter->seq;
714
715         trace_assign_type(field, iter->ent);
716
717         SEQ_PUT_HEX_FIELD_RET(s, field->ip);
718         SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
719
720         return TRACE_TYPE_HANDLED;
721 }
722
723 static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags)
724 {
725         struct ftrace_entry *field;
726         struct trace_seq *s = &iter->seq;
727
728         trace_assign_type(field, iter->ent);
729
730         SEQ_PUT_FIELD_RET(s, field->ip);
731         SEQ_PUT_FIELD_RET(s, field->parent_ip);
732
733         return TRACE_TYPE_HANDLED;
734 }
735
736 static struct trace_event trace_fn_event = {
737         .type           = TRACE_FN,
738         .trace          = trace_fn_trace,
739         .raw            = trace_fn_raw,
740         .hex            = trace_fn_hex,
741         .binary         = trace_fn_bin,
742 };
743
744 /* TRACE_CTX an TRACE_WAKE */
745 static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
746                                              char *delim)
747 {
748         struct ctx_switch_entry *field;
749         char comm[TASK_COMM_LEN];
750         int S, T;
751
752
753         trace_assign_type(field, iter->ent);
754
755         T = task_state_char(field->next_state);
756         S = task_state_char(field->prev_state);
757         trace_find_cmdline(field->next_pid, comm);
758         if (!trace_seq_printf(&iter->seq,
759                               " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
760                               field->prev_pid,
761                               field->prev_prio,
762                               S, delim,
763                               field->next_cpu,
764                               field->next_pid,
765                               field->next_prio,
766                               T, comm))
767                 return TRACE_TYPE_PARTIAL_LINE;
768
769         return TRACE_TYPE_HANDLED;
770 }
771
772 static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags)
773 {
774         return trace_ctxwake_print(iter, "==>");
775 }
776
777 static enum print_line_t trace_wake_print(struct trace_iterator *iter,
778                                           int flags)
779 {
780         return trace_ctxwake_print(iter, "  +");
781 }
782
783 static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
784 {
785         struct ctx_switch_entry *field;
786         int T;
787
788         trace_assign_type(field, iter->ent);
789
790         if (!S)
791                 task_state_char(field->prev_state);
792         T = task_state_char(field->next_state);
793         if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
794                               field->prev_pid,
795                               field->prev_prio,
796                               S,
797                               field->next_cpu,
798                               field->next_pid,
799                               field->next_prio,
800                               T))
801                 return TRACE_TYPE_PARTIAL_LINE;
802
803         return TRACE_TYPE_HANDLED;
804 }
805
806 static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags)
807 {
808         return trace_ctxwake_raw(iter, 0);
809 }
810
811 static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags)
812 {
813         return trace_ctxwake_raw(iter, '+');
814 }
815
816
817 static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
818 {
819         struct ctx_switch_entry *field;
820         struct trace_seq *s = &iter->seq;
821         int T;
822
823         trace_assign_type(field, iter->ent);
824
825         if (!S)
826                 task_state_char(field->prev_state);
827         T = task_state_char(field->next_state);
828
829         SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
830         SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
831         SEQ_PUT_HEX_FIELD_RET(s, S);
832         SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu);
833         SEQ_PUT_HEX_FIELD_RET(s, field->next_pid);
834         SEQ_PUT_HEX_FIELD_RET(s, field->next_prio);
835         SEQ_PUT_HEX_FIELD_RET(s, T);
836
837         return TRACE_TYPE_HANDLED;
838 }
839
840 static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags)
841 {
842         return trace_ctxwake_hex(iter, 0);
843 }
844
845 static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags)
846 {
847         return trace_ctxwake_hex(iter, '+');
848 }
849
850 static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter,
851                                            int flags)
852 {
853         struct ctx_switch_entry *field;
854         struct trace_seq *s = &iter->seq;
855
856         trace_assign_type(field, iter->ent);
857
858         SEQ_PUT_FIELD_RET(s, field->prev_pid);
859         SEQ_PUT_FIELD_RET(s, field->prev_prio);
860         SEQ_PUT_FIELD_RET(s, field->prev_state);
861         SEQ_PUT_FIELD_RET(s, field->next_pid);
862         SEQ_PUT_FIELD_RET(s, field->next_prio);
863         SEQ_PUT_FIELD_RET(s, field->next_state);
864
865         return TRACE_TYPE_HANDLED;
866 }
867
868 static struct trace_event trace_ctx_event = {
869         .type           = TRACE_CTX,
870         .trace          = trace_ctx_print,
871         .raw            = trace_ctx_raw,
872         .hex            = trace_ctx_hex,
873         .binary         = trace_ctxwake_bin,
874 };
875
876 static struct trace_event trace_wake_event = {
877         .type           = TRACE_WAKE,
878         .trace          = trace_wake_print,
879         .raw            = trace_wake_raw,
880         .hex            = trace_wake_hex,
881         .binary         = trace_ctxwake_bin,
882 };
883
884 /* TRACE_SPECIAL */
885 static enum print_line_t trace_special_print(struct trace_iterator *iter,
886                                              int flags)
887 {
888         struct special_entry *field;
889
890         trace_assign_type(field, iter->ent);
891
892         if (!trace_seq_printf(&iter->seq, "# %ld %ld %ld\n",
893                               field->arg1,
894                               field->arg2,
895                               field->arg3))
896                 return TRACE_TYPE_PARTIAL_LINE;
897
898         return TRACE_TYPE_HANDLED;
899 }
900
901 static enum print_line_t trace_special_hex(struct trace_iterator *iter,
902                                            int flags)
903 {
904         struct special_entry *field;
905         struct trace_seq *s = &iter->seq;
906
907         trace_assign_type(field, iter->ent);
908
909         SEQ_PUT_HEX_FIELD_RET(s, field->arg1);
910         SEQ_PUT_HEX_FIELD_RET(s, field->arg2);
911         SEQ_PUT_HEX_FIELD_RET(s, field->arg3);
912
913         return TRACE_TYPE_HANDLED;
914 }
915
916 static enum print_line_t trace_special_bin(struct trace_iterator *iter,
917                                            int flags)
918 {
919         struct special_entry *field;
920         struct trace_seq *s = &iter->seq;
921
922         trace_assign_type(field, iter->ent);
923
924         SEQ_PUT_FIELD_RET(s, field->arg1);
925         SEQ_PUT_FIELD_RET(s, field->arg2);
926         SEQ_PUT_FIELD_RET(s, field->arg3);
927
928         return TRACE_TYPE_HANDLED;
929 }
930
931 static struct trace_event trace_special_event = {
932         .type           = TRACE_SPECIAL,
933         .trace          = trace_special_print,
934         .raw            = trace_special_print,
935         .hex            = trace_special_hex,
936         .binary         = trace_special_bin,
937 };
938
939 /* TRACE_STACK */
940
941 static enum print_line_t trace_stack_print(struct trace_iterator *iter,
942                                            int flags)
943 {
944         struct stack_entry *field;
945         struct trace_seq *s = &iter->seq;
946         int i;
947
948         trace_assign_type(field, iter->ent);
949
950         for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
951                 if (!field->caller[i])
952                         break;
953                 if (i) {
954                         if (!trace_seq_puts(s, " <= "))
955                                 goto partial;
956
957                         if (!seq_print_ip_sym(s, field->caller[i], flags))
958                                 goto partial;
959                 }
960                 if (!trace_seq_puts(s, "\n"))
961                         goto partial;
962         }
963
964         return TRACE_TYPE_HANDLED;
965
966  partial:
967         return TRACE_TYPE_PARTIAL_LINE;
968 }
969
970 static struct trace_event trace_stack_event = {
971         .type           = TRACE_STACK,
972         .trace          = trace_stack_print,
973         .raw            = trace_special_print,
974         .hex            = trace_special_hex,
975         .binary         = trace_special_bin,
976 };
977
978 /* TRACE_USER_STACK */
979 static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
980                                                 int flags)
981 {
982         struct userstack_entry *field;
983         struct trace_seq *s = &iter->seq;
984
985         trace_assign_type(field, iter->ent);
986
987         if (!seq_print_userip_objs(field, s, flags))
988                 goto partial;
989
990         if (!trace_seq_putc(s, '\n'))
991                 goto partial;
992
993         return TRACE_TYPE_HANDLED;
994
995  partial:
996         return TRACE_TYPE_PARTIAL_LINE;
997 }
998
999 static struct trace_event trace_user_stack_event = {
1000         .type           = TRACE_USER_STACK,
1001         .trace          = trace_user_stack_print,
1002         .raw            = trace_special_print,
1003         .hex            = trace_special_hex,
1004         .binary         = trace_special_bin,
1005 };
1006
1007 /* TRACE_BPRINT */
1008 static enum print_line_t
1009 trace_bprint_print(struct trace_iterator *iter, int flags)
1010 {
1011         struct trace_entry *entry = iter->ent;
1012         struct trace_seq *s = &iter->seq;
1013         struct bprint_entry *field;
1014
1015         trace_assign_type(field, entry);
1016
1017         if (!seq_print_ip_sym(s, field->ip, flags))
1018                 goto partial;
1019
1020         if (!trace_seq_puts(s, ": "))
1021                 goto partial;
1022
1023         if (!trace_seq_bprintf(s, field->fmt, field->buf))
1024                 goto partial;
1025
1026         return TRACE_TYPE_HANDLED;
1027
1028  partial:
1029         return TRACE_TYPE_PARTIAL_LINE;
1030 }
1031
1032
1033 static enum print_line_t
1034 trace_bprint_raw(struct trace_iterator *iter, int flags)
1035 {
1036         struct bprint_entry *field;
1037         struct trace_seq *s = &iter->seq;
1038
1039         trace_assign_type(field, iter->ent);
1040
1041         if (!trace_seq_printf(s, ": %lx : ", field->ip))
1042                 goto partial;
1043
1044         if (!trace_seq_bprintf(s, field->fmt, field->buf))
1045                 goto partial;
1046
1047         return TRACE_TYPE_HANDLED;
1048
1049  partial:
1050         return TRACE_TYPE_PARTIAL_LINE;
1051 }
1052
1053
1054 static struct trace_event trace_bprint_event = {
1055         .type           = TRACE_BPRINT,
1056         .trace          = trace_bprint_print,
1057         .raw            = trace_bprint_raw,
1058 };
1059
1060 /* TRACE_PRINT */
1061 static enum print_line_t trace_print_print(struct trace_iterator *iter,
1062                                            int flags)
1063 {
1064         struct print_entry *field;
1065         struct trace_seq *s = &iter->seq;
1066
1067         trace_assign_type(field, iter->ent);
1068
1069         if (!seq_print_ip_sym(s, field->ip, flags))
1070                 goto partial;
1071
1072         if (!trace_seq_printf(s, ": %s", field->buf))
1073                 goto partial;
1074
1075         return TRACE_TYPE_HANDLED;
1076
1077  partial:
1078         return TRACE_TYPE_PARTIAL_LINE;
1079 }
1080
1081 static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags)
1082 {
1083         struct print_entry *field;
1084
1085         trace_assign_type(field, iter->ent);
1086
1087         if (!trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf))
1088                 goto partial;
1089
1090         return TRACE_TYPE_HANDLED;
1091
1092  partial:
1093         return TRACE_TYPE_PARTIAL_LINE;
1094 }
1095
1096 static struct trace_event trace_print_event = {
1097         .type           = TRACE_PRINT,
1098         .trace          = trace_print_print,
1099         .raw            = trace_print_raw,
1100 };
1101
1102
1103 static struct trace_event *events[] __initdata = {
1104         &trace_fn_event,
1105         &trace_ctx_event,
1106         &trace_wake_event,
1107         &trace_special_event,
1108         &trace_stack_event,
1109         &trace_user_stack_event,
1110         &trace_bprint_event,
1111         &trace_print_event,
1112         NULL
1113 };
1114
1115 __init static int init_events(void)
1116 {
1117         struct trace_event *event;
1118         int i, ret;
1119
1120         for (i = 0; events[i]; i++) {
1121                 event = events[i];
1122
1123                 ret = register_ftrace_event(event);
1124                 if (!ret) {
1125                         printk(KERN_WARNING "event %d failed to register\n",
1126                                event->type);
1127                         WARN_ON_ONCE(1);
1128                 }
1129         }
1130
1131         return 0;
1132 }
1133 device_initcall(init_events);