tracing: add __print_symbolic to trace events
[safe/jmp/linux-2.6] / kernel / trace / trace_output.c
1 /*
2  * trace_output.c
3  *
4  * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5  *
6  */
7
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/ftrace.h>
11
12 #include "trace_output.h"
13
14 /* must be a power of 2 */
15 #define EVENT_HASHSIZE  128
16
17 static DECLARE_RWSEM(trace_event_mutex);
18
19 DEFINE_PER_CPU(struct trace_seq, ftrace_event_seq);
20
21 static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
22
23 static int next_event_type = __TRACE_LAST_TYPE + 1;
24
25 void trace_print_seq(struct seq_file *m, struct trace_seq *s)
26 {
27         int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
28
29         s->buffer[len] = 0;
30         seq_puts(m, s->buffer);
31
32         trace_seq_init(s);
33 }
34
35 enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
36 {
37         struct trace_seq *s = &iter->seq;
38         struct trace_entry *entry = iter->ent;
39         struct bprint_entry *field;
40         int ret;
41
42         trace_assign_type(field, entry);
43
44         ret = trace_seq_bprintf(s, field->fmt, field->buf);
45         if (!ret)
46                 return TRACE_TYPE_PARTIAL_LINE;
47
48         return TRACE_TYPE_HANDLED;
49 }
50
51 enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
52 {
53         struct trace_seq *s = &iter->seq;
54         struct trace_entry *entry = iter->ent;
55         struct print_entry *field;
56         int ret;
57
58         trace_assign_type(field, entry);
59
60         ret = trace_seq_printf(s, "%s", field->buf);
61         if (!ret)
62                 return TRACE_TYPE_PARTIAL_LINE;
63
64         return TRACE_TYPE_HANDLED;
65 }
66
67 /**
68  * trace_seq_printf - sequence printing of trace information
69  * @s: trace sequence descriptor
70  * @fmt: printf format string
71  *
72  * The tracer may use either sequence operations or its own
73  * copy to user routines. To simplify formating of a trace
74  * trace_seq_printf is used to store strings into a special
75  * buffer (@s). Then the output may be either used by
76  * the sequencer or pulled into another buffer.
77  */
78 int
79 trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
80 {
81         int len = (PAGE_SIZE - 1) - s->len;
82         va_list ap;
83         int ret;
84
85         if (!len)
86                 return 0;
87
88         va_start(ap, fmt);
89         ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
90         va_end(ap);
91
92         /* If we can't write it all, don't bother writing anything */
93         if (ret >= len)
94                 return 0;
95
96         s->len += ret;
97
98         return len;
99 }
100 EXPORT_SYMBOL_GPL(trace_seq_printf);
101
102 int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
103 {
104         int len = (PAGE_SIZE - 1) - s->len;
105         int ret;
106
107         if (!len)
108                 return 0;
109
110         ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
111
112         /* If we can't write it all, don't bother writing anything */
113         if (ret >= len)
114                 return 0;
115
116         s->len += ret;
117
118         return len;
119 }
120
121 /**
122  * trace_seq_puts - trace sequence printing of simple string
123  * @s: trace sequence descriptor
124  * @str: simple string to record
125  *
126  * The tracer may use either the sequence operations or its own
127  * copy to user routines. This function records a simple string
128  * into a special buffer (@s) for later retrieval by a sequencer
129  * or other mechanism.
130  */
131 int trace_seq_puts(struct trace_seq *s, const char *str)
132 {
133         int len = strlen(str);
134
135         if (len > ((PAGE_SIZE - 1) - s->len))
136                 return 0;
137
138         memcpy(s->buffer + s->len, str, len);
139         s->len += len;
140
141         return len;
142 }
143
144 int trace_seq_putc(struct trace_seq *s, unsigned char c)
145 {
146         if (s->len >= (PAGE_SIZE - 1))
147                 return 0;
148
149         s->buffer[s->len++] = c;
150
151         return 1;
152 }
153
154 int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len)
155 {
156         if (len > ((PAGE_SIZE - 1) - s->len))
157                 return 0;
158
159         memcpy(s->buffer + s->len, mem, len);
160         s->len += len;
161
162         return len;
163 }
164
165 int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, size_t len)
166 {
167         unsigned char hex[HEX_CHARS];
168         const unsigned char *data = mem;
169         int i, j;
170
171 #ifdef __BIG_ENDIAN
172         for (i = 0, j = 0; i < len; i++) {
173 #else
174         for (i = len-1, j = 0; i >= 0; i--) {
175 #endif
176                 hex[j++] = hex_asc_hi(data[i]);
177                 hex[j++] = hex_asc_lo(data[i]);
178         }
179         hex[j++] = ' ';
180
181         return trace_seq_putmem(s, hex, j);
182 }
183
184 void *trace_seq_reserve(struct trace_seq *s, size_t len)
185 {
186         void *ret;
187
188         if (len > ((PAGE_SIZE - 1) - s->len))
189                 return NULL;
190
191         ret = s->buffer + s->len;
192         s->len += len;
193
194         return ret;
195 }
196
197 int trace_seq_path(struct trace_seq *s, struct path *path)
198 {
199         unsigned char *p;
200
201         if (s->len >= (PAGE_SIZE - 1))
202                 return 0;
203         p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
204         if (!IS_ERR(p)) {
205                 p = mangle_path(s->buffer + s->len, p, "\n");
206                 if (p) {
207                         s->len = p - s->buffer;
208                         return 1;
209                 }
210         } else {
211                 s->buffer[s->len++] = '?';
212                 return 1;
213         }
214
215         return 0;
216 }
217
218 const char *
219 ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
220                        unsigned long flags,
221                        const struct trace_print_flags *flag_array)
222 {
223         unsigned long mask;
224         const char *str;
225         int i;
226
227         trace_seq_init(p);
228
229         for (i = 0;  flag_array[i].name && flags; i++) {
230
231                 mask = flag_array[i].mask;
232                 if ((flags & mask) != mask)
233                         continue;
234
235                 str = flag_array[i].name;
236                 flags &= ~mask;
237                 if (p->len && delim)
238                         trace_seq_puts(p, delim);
239                 trace_seq_puts(p, str);
240         }
241
242         /* check for left over flags */
243         if (flags) {
244                 if (p->len && delim)
245                         trace_seq_puts(p, delim);
246                 trace_seq_printf(p, "0x%lx", flags);
247         }
248
249         trace_seq_putc(p, 0);
250
251         return p->buffer;
252 }
253
254 const char *
255 ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
256                          const struct trace_print_flags *symbol_array)
257 {
258         int i;
259
260         trace_seq_init(p);
261
262         for (i = 0;  symbol_array[i].name; i++) {
263
264                 if (val != symbol_array[i].mask)
265                         continue;
266
267                 trace_seq_puts(p, symbol_array[i].name);
268                 break;
269         }
270
271         if (!p->len)
272                 trace_seq_printf(p, "0x%lx", val);
273                 
274         trace_seq_putc(p, 0);
275
276         return p->buffer;
277 }
278
279 #ifdef CONFIG_KRETPROBES
280 static inline const char *kretprobed(const char *name)
281 {
282         static const char tramp_name[] = "kretprobe_trampoline";
283         int size = sizeof(tramp_name);
284
285         if (strncmp(tramp_name, name, size) == 0)
286                 return "[unknown/kretprobe'd]";
287         return name;
288 }
289 #else
290 static inline const char *kretprobed(const char *name)
291 {
292         return name;
293 }
294 #endif /* CONFIG_KRETPROBES */
295
296 static int
297 seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
298 {
299 #ifdef CONFIG_KALLSYMS
300         char str[KSYM_SYMBOL_LEN];
301         const char *name;
302
303         kallsyms_lookup(address, NULL, NULL, NULL, str);
304
305         name = kretprobed(str);
306
307         return trace_seq_printf(s, fmt, name);
308 #endif
309         return 1;
310 }
311
312 static int
313 seq_print_sym_offset(struct trace_seq *s, const char *fmt,
314                      unsigned long address)
315 {
316 #ifdef CONFIG_KALLSYMS
317         char str[KSYM_SYMBOL_LEN];
318         const char *name;
319
320         sprint_symbol(str, address);
321         name = kretprobed(str);
322
323         return trace_seq_printf(s, fmt, name);
324 #endif
325         return 1;
326 }
327
328 #ifndef CONFIG_64BIT
329 # define IP_FMT "%08lx"
330 #else
331 # define IP_FMT "%016lx"
332 #endif
333
334 int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
335                       unsigned long ip, unsigned long sym_flags)
336 {
337         struct file *file = NULL;
338         unsigned long vmstart = 0;
339         int ret = 1;
340
341         if (mm) {
342                 const struct vm_area_struct *vma;
343
344                 down_read(&mm->mmap_sem);
345                 vma = find_vma(mm, ip);
346                 if (vma) {
347                         file = vma->vm_file;
348                         vmstart = vma->vm_start;
349                 }
350                 if (file) {
351                         ret = trace_seq_path(s, &file->f_path);
352                         if (ret)
353                                 ret = trace_seq_printf(s, "[+0x%lx]",
354                                                        ip - vmstart);
355                 }
356                 up_read(&mm->mmap_sem);
357         }
358         if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
359                 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
360         return ret;
361 }
362
363 int
364 seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
365                       unsigned long sym_flags)
366 {
367         struct mm_struct *mm = NULL;
368         int ret = 1;
369         unsigned int i;
370
371         if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
372                 struct task_struct *task;
373                 /*
374                  * we do the lookup on the thread group leader,
375                  * since individual threads might have already quit!
376                  */
377                 rcu_read_lock();
378                 task = find_task_by_vpid(entry->ent.tgid);
379                 if (task)
380                         mm = get_task_mm(task);
381                 rcu_read_unlock();
382         }
383
384         for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
385                 unsigned long ip = entry->caller[i];
386
387                 if (ip == ULONG_MAX || !ret)
388                         break;
389                 if (i && ret)
390                         ret = trace_seq_puts(s, " <- ");
391                 if (!ip) {
392                         if (ret)
393                                 ret = trace_seq_puts(s, "??");
394                         continue;
395                 }
396                 if (!ret)
397                         break;
398                 if (ret)
399                         ret = seq_print_user_ip(s, mm, ip, sym_flags);
400         }
401
402         if (mm)
403                 mmput(mm);
404         return ret;
405 }
406
407 int
408 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
409 {
410         int ret;
411
412         if (!ip)
413                 return trace_seq_printf(s, "0");
414
415         if (sym_flags & TRACE_ITER_SYM_OFFSET)
416                 ret = seq_print_sym_offset(s, "%s", ip);
417         else
418                 ret = seq_print_sym_short(s, "%s", ip);
419
420         if (!ret)
421                 return 0;
422
423         if (sym_flags & TRACE_ITER_SYM_ADDR)
424                 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
425         return ret;
426 }
427
428 static int
429 lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
430 {
431         int hardirq, softirq;
432         char comm[TASK_COMM_LEN];
433
434         trace_find_cmdline(entry->pid, comm);
435         hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
436         softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
437
438         if (!trace_seq_printf(s, "%8.8s-%-5d %3d%c%c%c",
439                               comm, entry->pid, cpu,
440                               (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
441                                 (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ?
442                                   'X' : '.',
443                               (entry->flags & TRACE_FLAG_NEED_RESCHED) ?
444                                 'N' : '.',
445                               (hardirq && softirq) ? 'H' :
446                                 hardirq ? 'h' : softirq ? 's' : '.'))
447                 return 0;
448
449         if (entry->preempt_count)
450                 return trace_seq_printf(s, "%x", entry->preempt_count);
451         return trace_seq_puts(s, ".");
452 }
453
454 static unsigned long preempt_mark_thresh = 100;
455
456 static int
457 lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
458                     unsigned long rel_usecs)
459 {
460         return trace_seq_printf(s, " %4lldus%c: ", abs_usecs,
461                                 rel_usecs > preempt_mark_thresh ? '!' :
462                                   rel_usecs > 1 ? '+' : ' ');
463 }
464
465 int trace_print_context(struct trace_iterator *iter)
466 {
467         struct trace_seq *s = &iter->seq;
468         struct trace_entry *entry = iter->ent;
469         unsigned long long t = ns2usecs(iter->ts);
470         unsigned long usec_rem = do_div(t, USEC_PER_SEC);
471         unsigned long secs = (unsigned long)t;
472         char comm[TASK_COMM_LEN];
473
474         trace_find_cmdline(entry->pid, comm);
475
476         return trace_seq_printf(s, "%16s-%-5d [%03d] %5lu.%06lu: ",
477                                 comm, entry->pid, iter->cpu, secs, usec_rem);
478 }
479
480 int trace_print_lat_context(struct trace_iterator *iter)
481 {
482         u64 next_ts;
483         int ret;
484         struct trace_seq *s = &iter->seq;
485         struct trace_entry *entry = iter->ent,
486                            *next_entry = trace_find_next_entry(iter, NULL,
487                                                                &next_ts);
488         unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
489         unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
490         unsigned long rel_usecs;
491
492         if (!next_entry)
493                 next_ts = iter->ts;
494         rel_usecs = ns2usecs(next_ts - iter->ts);
495
496         if (verbose) {
497                 char comm[TASK_COMM_LEN];
498
499                 trace_find_cmdline(entry->pid, comm);
500
501                 ret = trace_seq_printf(s, "%16s %5d %3d %d %08x %08lx [%08llx]"
502                                        " %ld.%03ldms (+%ld.%03ldms): ", comm,
503                                        entry->pid, iter->cpu, entry->flags,
504                                        entry->preempt_count, iter->idx,
505                                        ns2usecs(iter->ts),
506                                        abs_usecs / USEC_PER_MSEC,
507                                        abs_usecs % USEC_PER_MSEC,
508                                        rel_usecs / USEC_PER_MSEC,
509                                        rel_usecs % USEC_PER_MSEC);
510         } else {
511                 ret = lat_print_generic(s, entry, iter->cpu);
512                 if (ret)
513                         ret = lat_print_timestamp(s, abs_usecs, rel_usecs);
514         }
515
516         return ret;
517 }
518
519 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
520
521 static int task_state_char(unsigned long state)
522 {
523         int bit = state ? __ffs(state) + 1 : 0;
524
525         return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
526 }
527
528 /**
529  * ftrace_find_event - find a registered event
530  * @type: the type of event to look for
531  *
532  * Returns an event of type @type otherwise NULL
533  * Called with trace_event_read_lock() held.
534  */
535 struct trace_event *ftrace_find_event(int type)
536 {
537         struct trace_event *event;
538         struct hlist_node *n;
539         unsigned key;
540
541         key = type & (EVENT_HASHSIZE - 1);
542
543         hlist_for_each_entry(event, n, &event_hash[key], node) {
544                 if (event->type == type)
545                         return event;
546         }
547
548         return NULL;
549 }
550
551 static LIST_HEAD(ftrace_event_list);
552
553 static int trace_search_list(struct list_head **list)
554 {
555         struct trace_event *e;
556         int last = __TRACE_LAST_TYPE;
557
558         if (list_empty(&ftrace_event_list)) {
559                 *list = &ftrace_event_list;
560                 return last + 1;
561         }
562
563         /*
564          * We used up all possible max events,
565          * lets see if somebody freed one.
566          */
567         list_for_each_entry(e, &ftrace_event_list, list) {
568                 if (e->type != last + 1)
569                         break;
570                 last++;
571         }
572
573         /* Did we used up all 65 thousand events??? */
574         if ((last + 1) > FTRACE_MAX_EVENT)
575                 return 0;
576
577         *list = &e->list;
578         return last + 1;
579 }
580
581 void trace_event_read_lock(void)
582 {
583         down_read(&trace_event_mutex);
584 }
585
586 void trace_event_read_unlock(void)
587 {
588         up_read(&trace_event_mutex);
589 }
590
591 /**
592  * register_ftrace_event - register output for an event type
593  * @event: the event type to register
594  *
595  * Event types are stored in a hash and this hash is used to
596  * find a way to print an event. If the @event->type is set
597  * then it will use that type, otherwise it will assign a
598  * type to use.
599  *
600  * If you assign your own type, please make sure it is added
601  * to the trace_type enum in trace.h, to avoid collisions
602  * with the dynamic types.
603  *
604  * Returns the event type number or zero on error.
605  */
606 int register_ftrace_event(struct trace_event *event)
607 {
608         unsigned key;
609         int ret = 0;
610
611         down_write(&trace_event_mutex);
612
613         if (WARN_ON(!event))
614                 goto out;
615
616         INIT_LIST_HEAD(&event->list);
617
618         if (!event->type) {
619                 struct list_head *list = NULL;
620
621                 if (next_event_type > FTRACE_MAX_EVENT) {
622
623                         event->type = trace_search_list(&list);
624                         if (!event->type)
625                                 goto out;
626
627                 } else {
628                         
629                         event->type = next_event_type++;
630                         list = &ftrace_event_list;
631                 }
632
633                 if (WARN_ON(ftrace_find_event(event->type)))
634                         goto out;
635
636                 list_add_tail(&event->list, list);
637
638         } else if (event->type > __TRACE_LAST_TYPE) {
639                 printk(KERN_WARNING "Need to add type to trace.h\n");
640                 WARN_ON(1);
641                 goto out;
642         } else {
643                 /* Is this event already used */
644                 if (ftrace_find_event(event->type))
645                         goto out;
646         }
647
648         if (event->trace == NULL)
649                 event->trace = trace_nop_print;
650         if (event->raw == NULL)
651                 event->raw = trace_nop_print;
652         if (event->hex == NULL)
653                 event->hex = trace_nop_print;
654         if (event->binary == NULL)
655                 event->binary = trace_nop_print;
656
657         key = event->type & (EVENT_HASHSIZE - 1);
658
659         hlist_add_head(&event->node, &event_hash[key]);
660
661         ret = event->type;
662  out:
663         up_write(&trace_event_mutex);
664
665         return ret;
666 }
667 EXPORT_SYMBOL_GPL(register_ftrace_event);
668
669 /**
670  * unregister_ftrace_event - remove a no longer used event
671  * @event: the event to remove
672  */
673 int unregister_ftrace_event(struct trace_event *event)
674 {
675         down_write(&trace_event_mutex);
676         hlist_del(&event->node);
677         list_del(&event->list);
678         up_write(&trace_event_mutex);
679
680         return 0;
681 }
682 EXPORT_SYMBOL_GPL(unregister_ftrace_event);
683
684 /*
685  * Standard events
686  */
687
688 enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags)
689 {
690         return TRACE_TYPE_HANDLED;
691 }
692
693 /* TRACE_FN */
694 static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags)
695 {
696         struct ftrace_entry *field;
697         struct trace_seq *s = &iter->seq;
698
699         trace_assign_type(field, iter->ent);
700
701         if (!seq_print_ip_sym(s, field->ip, flags))
702                 goto partial;
703
704         if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) {
705                 if (!trace_seq_printf(s, " <-"))
706                         goto partial;
707                 if (!seq_print_ip_sym(s,
708                                       field->parent_ip,
709                                       flags))
710                         goto partial;
711         }
712         if (!trace_seq_printf(s, "\n"))
713                 goto partial;
714
715         return TRACE_TYPE_HANDLED;
716
717  partial:
718         return TRACE_TYPE_PARTIAL_LINE;
719 }
720
721 static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags)
722 {
723         struct ftrace_entry *field;
724
725         trace_assign_type(field, iter->ent);
726
727         if (!trace_seq_printf(&iter->seq, "%lx %lx\n",
728                               field->ip,
729                               field->parent_ip))
730                 return TRACE_TYPE_PARTIAL_LINE;
731
732         return TRACE_TYPE_HANDLED;
733 }
734
735 static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags)
736 {
737         struct ftrace_entry *field;
738         struct trace_seq *s = &iter->seq;
739
740         trace_assign_type(field, iter->ent);
741
742         SEQ_PUT_HEX_FIELD_RET(s, field->ip);
743         SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
744
745         return TRACE_TYPE_HANDLED;
746 }
747
748 static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags)
749 {
750         struct ftrace_entry *field;
751         struct trace_seq *s = &iter->seq;
752
753         trace_assign_type(field, iter->ent);
754
755         SEQ_PUT_FIELD_RET(s, field->ip);
756         SEQ_PUT_FIELD_RET(s, field->parent_ip);
757
758         return TRACE_TYPE_HANDLED;
759 }
760
761 static struct trace_event trace_fn_event = {
762         .type           = TRACE_FN,
763         .trace          = trace_fn_trace,
764         .raw            = trace_fn_raw,
765         .hex            = trace_fn_hex,
766         .binary         = trace_fn_bin,
767 };
768
769 /* TRACE_CTX an TRACE_WAKE */
770 static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
771                                              char *delim)
772 {
773         struct ctx_switch_entry *field;
774         char comm[TASK_COMM_LEN];
775         int S, T;
776
777
778         trace_assign_type(field, iter->ent);
779
780         T = task_state_char(field->next_state);
781         S = task_state_char(field->prev_state);
782         trace_find_cmdline(field->next_pid, comm);
783         if (!trace_seq_printf(&iter->seq,
784                               " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
785                               field->prev_pid,
786                               field->prev_prio,
787                               S, delim,
788                               field->next_cpu,
789                               field->next_pid,
790                               field->next_prio,
791                               T, comm))
792                 return TRACE_TYPE_PARTIAL_LINE;
793
794         return TRACE_TYPE_HANDLED;
795 }
796
797 static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags)
798 {
799         return trace_ctxwake_print(iter, "==>");
800 }
801
802 static enum print_line_t trace_wake_print(struct trace_iterator *iter,
803                                           int flags)
804 {
805         return trace_ctxwake_print(iter, "  +");
806 }
807
808 static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
809 {
810         struct ctx_switch_entry *field;
811         int T;
812
813         trace_assign_type(field, iter->ent);
814
815         if (!S)
816                 task_state_char(field->prev_state);
817         T = task_state_char(field->next_state);
818         if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
819                               field->prev_pid,
820                               field->prev_prio,
821                               S,
822                               field->next_cpu,
823                               field->next_pid,
824                               field->next_prio,
825                               T))
826                 return TRACE_TYPE_PARTIAL_LINE;
827
828         return TRACE_TYPE_HANDLED;
829 }
830
831 static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags)
832 {
833         return trace_ctxwake_raw(iter, 0);
834 }
835
836 static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags)
837 {
838         return trace_ctxwake_raw(iter, '+');
839 }
840
841
842 static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
843 {
844         struct ctx_switch_entry *field;
845         struct trace_seq *s = &iter->seq;
846         int T;
847
848         trace_assign_type(field, iter->ent);
849
850         if (!S)
851                 task_state_char(field->prev_state);
852         T = task_state_char(field->next_state);
853
854         SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
855         SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
856         SEQ_PUT_HEX_FIELD_RET(s, S);
857         SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu);
858         SEQ_PUT_HEX_FIELD_RET(s, field->next_pid);
859         SEQ_PUT_HEX_FIELD_RET(s, field->next_prio);
860         SEQ_PUT_HEX_FIELD_RET(s, T);
861
862         return TRACE_TYPE_HANDLED;
863 }
864
865 static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags)
866 {
867         return trace_ctxwake_hex(iter, 0);
868 }
869
870 static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags)
871 {
872         return trace_ctxwake_hex(iter, '+');
873 }
874
875 static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter,
876                                            int flags)
877 {
878         struct ctx_switch_entry *field;
879         struct trace_seq *s = &iter->seq;
880
881         trace_assign_type(field, iter->ent);
882
883         SEQ_PUT_FIELD_RET(s, field->prev_pid);
884         SEQ_PUT_FIELD_RET(s, field->prev_prio);
885         SEQ_PUT_FIELD_RET(s, field->prev_state);
886         SEQ_PUT_FIELD_RET(s, field->next_pid);
887         SEQ_PUT_FIELD_RET(s, field->next_prio);
888         SEQ_PUT_FIELD_RET(s, field->next_state);
889
890         return TRACE_TYPE_HANDLED;
891 }
892
893 static struct trace_event trace_ctx_event = {
894         .type           = TRACE_CTX,
895         .trace          = trace_ctx_print,
896         .raw            = trace_ctx_raw,
897         .hex            = trace_ctx_hex,
898         .binary         = trace_ctxwake_bin,
899 };
900
901 static struct trace_event trace_wake_event = {
902         .type           = TRACE_WAKE,
903         .trace          = trace_wake_print,
904         .raw            = trace_wake_raw,
905         .hex            = trace_wake_hex,
906         .binary         = trace_ctxwake_bin,
907 };
908
909 /* TRACE_SPECIAL */
910 static enum print_line_t trace_special_print(struct trace_iterator *iter,
911                                              int flags)
912 {
913         struct special_entry *field;
914
915         trace_assign_type(field, iter->ent);
916
917         if (!trace_seq_printf(&iter->seq, "# %ld %ld %ld\n",
918                               field->arg1,
919                               field->arg2,
920                               field->arg3))
921                 return TRACE_TYPE_PARTIAL_LINE;
922
923         return TRACE_TYPE_HANDLED;
924 }
925
926 static enum print_line_t trace_special_hex(struct trace_iterator *iter,
927                                            int flags)
928 {
929         struct special_entry *field;
930         struct trace_seq *s = &iter->seq;
931
932         trace_assign_type(field, iter->ent);
933
934         SEQ_PUT_HEX_FIELD_RET(s, field->arg1);
935         SEQ_PUT_HEX_FIELD_RET(s, field->arg2);
936         SEQ_PUT_HEX_FIELD_RET(s, field->arg3);
937
938         return TRACE_TYPE_HANDLED;
939 }
940
941 static enum print_line_t trace_special_bin(struct trace_iterator *iter,
942                                            int flags)
943 {
944         struct special_entry *field;
945         struct trace_seq *s = &iter->seq;
946
947         trace_assign_type(field, iter->ent);
948
949         SEQ_PUT_FIELD_RET(s, field->arg1);
950         SEQ_PUT_FIELD_RET(s, field->arg2);
951         SEQ_PUT_FIELD_RET(s, field->arg3);
952
953         return TRACE_TYPE_HANDLED;
954 }
955
956 static struct trace_event trace_special_event = {
957         .type           = TRACE_SPECIAL,
958         .trace          = trace_special_print,
959         .raw            = trace_special_print,
960         .hex            = trace_special_hex,
961         .binary         = trace_special_bin,
962 };
963
964 /* TRACE_STACK */
965
966 static enum print_line_t trace_stack_print(struct trace_iterator *iter,
967                                            int flags)
968 {
969         struct stack_entry *field;
970         struct trace_seq *s = &iter->seq;
971         int i;
972
973         trace_assign_type(field, iter->ent);
974
975         for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
976                 if (!field->caller[i])
977                         break;
978                 if (i) {
979                         if (!trace_seq_puts(s, " <= "))
980                                 goto partial;
981
982                         if (!seq_print_ip_sym(s, field->caller[i], flags))
983                                 goto partial;
984                 }
985                 if (!trace_seq_puts(s, "\n"))
986                         goto partial;
987         }
988
989         return TRACE_TYPE_HANDLED;
990
991  partial:
992         return TRACE_TYPE_PARTIAL_LINE;
993 }
994
995 static struct trace_event trace_stack_event = {
996         .type           = TRACE_STACK,
997         .trace          = trace_stack_print,
998         .raw            = trace_special_print,
999         .hex            = trace_special_hex,
1000         .binary         = trace_special_bin,
1001 };
1002
1003 /* TRACE_USER_STACK */
1004 static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
1005                                                 int flags)
1006 {
1007         struct userstack_entry *field;
1008         struct trace_seq *s = &iter->seq;
1009
1010         trace_assign_type(field, iter->ent);
1011
1012         if (!seq_print_userip_objs(field, s, flags))
1013                 goto partial;
1014
1015         if (!trace_seq_putc(s, '\n'))
1016                 goto partial;
1017
1018         return TRACE_TYPE_HANDLED;
1019
1020  partial:
1021         return TRACE_TYPE_PARTIAL_LINE;
1022 }
1023
1024 static struct trace_event trace_user_stack_event = {
1025         .type           = TRACE_USER_STACK,
1026         .trace          = trace_user_stack_print,
1027         .raw            = trace_special_print,
1028         .hex            = trace_special_hex,
1029         .binary         = trace_special_bin,
1030 };
1031
1032 /* TRACE_BPRINT */
1033 static enum print_line_t
1034 trace_bprint_print(struct trace_iterator *iter, int flags)
1035 {
1036         struct trace_entry *entry = iter->ent;
1037         struct trace_seq *s = &iter->seq;
1038         struct bprint_entry *field;
1039
1040         trace_assign_type(field, entry);
1041
1042         if (!seq_print_ip_sym(s, field->ip, flags))
1043                 goto partial;
1044
1045         if (!trace_seq_puts(s, ": "))
1046                 goto partial;
1047
1048         if (!trace_seq_bprintf(s, field->fmt, field->buf))
1049                 goto partial;
1050
1051         return TRACE_TYPE_HANDLED;
1052
1053  partial:
1054         return TRACE_TYPE_PARTIAL_LINE;
1055 }
1056
1057
1058 static enum print_line_t
1059 trace_bprint_raw(struct trace_iterator *iter, int flags)
1060 {
1061         struct bprint_entry *field;
1062         struct trace_seq *s = &iter->seq;
1063
1064         trace_assign_type(field, iter->ent);
1065
1066         if (!trace_seq_printf(s, ": %lx : ", field->ip))
1067                 goto partial;
1068
1069         if (!trace_seq_bprintf(s, field->fmt, field->buf))
1070                 goto partial;
1071
1072         return TRACE_TYPE_HANDLED;
1073
1074  partial:
1075         return TRACE_TYPE_PARTIAL_LINE;
1076 }
1077
1078
1079 static struct trace_event trace_bprint_event = {
1080         .type           = TRACE_BPRINT,
1081         .trace          = trace_bprint_print,
1082         .raw            = trace_bprint_raw,
1083 };
1084
1085 /* TRACE_PRINT */
1086 static enum print_line_t trace_print_print(struct trace_iterator *iter,
1087                                            int flags)
1088 {
1089         struct print_entry *field;
1090         struct trace_seq *s = &iter->seq;
1091
1092         trace_assign_type(field, iter->ent);
1093
1094         if (!seq_print_ip_sym(s, field->ip, flags))
1095                 goto partial;
1096
1097         if (!trace_seq_printf(s, ": %s", field->buf))
1098                 goto partial;
1099
1100         return TRACE_TYPE_HANDLED;
1101
1102  partial:
1103         return TRACE_TYPE_PARTIAL_LINE;
1104 }
1105
1106 static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags)
1107 {
1108         struct print_entry *field;
1109
1110         trace_assign_type(field, iter->ent);
1111
1112         if (!trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf))
1113                 goto partial;
1114
1115         return TRACE_TYPE_HANDLED;
1116
1117  partial:
1118         return TRACE_TYPE_PARTIAL_LINE;
1119 }
1120
1121 static struct trace_event trace_print_event = {
1122         .type           = TRACE_PRINT,
1123         .trace          = trace_print_print,
1124         .raw            = trace_print_raw,
1125 };
1126
1127
1128 static struct trace_event *events[] __initdata = {
1129         &trace_fn_event,
1130         &trace_ctx_event,
1131         &trace_wake_event,
1132         &trace_special_event,
1133         &trace_stack_event,
1134         &trace_user_stack_event,
1135         &trace_bprint_event,
1136         &trace_print_event,
1137         NULL
1138 };
1139
1140 __init static int init_events(void)
1141 {
1142         struct trace_event *event;
1143         int i, ret;
1144
1145         for (i = 0; events[i]; i++) {
1146                 event = events[i];
1147
1148                 ret = register_ftrace_event(event);
1149                 if (!ret) {
1150                         printk(KERN_WARNING "event %d failed to register\n",
1151                                event->type);
1152                         WARN_ON_ONCE(1);
1153                 }
1154         }
1155
1156         return 0;
1157 }
1158 device_initcall(init_events);