4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/ftrace.h>
12 #include "trace_output.h"
14 /* must be a power of 2 */
15 #define EVENT_HASHSIZE 128
17 DECLARE_RWSEM(trace_event_mutex);
19 DEFINE_PER_CPU(struct trace_seq, ftrace_event_seq);
20 EXPORT_PER_CPU_SYMBOL(ftrace_event_seq);
22 static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
24 static int next_event_type = __TRACE_LAST_TYPE + 1;
26 void trace_print_seq(struct seq_file *m, struct trace_seq *s)
28 int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
30 seq_write(m, s->buffer, len);
35 enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
37 struct trace_seq *s = &iter->seq;
38 struct trace_entry *entry = iter->ent;
39 struct bprint_entry *field;
42 trace_assign_type(field, entry);
44 ret = trace_seq_bprintf(s, field->fmt, field->buf);
46 return TRACE_TYPE_PARTIAL_LINE;
48 return TRACE_TYPE_HANDLED;
51 enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
53 struct trace_seq *s = &iter->seq;
54 struct trace_entry *entry = iter->ent;
55 struct print_entry *field;
58 trace_assign_type(field, entry);
60 ret = trace_seq_printf(s, "%s", field->buf);
62 return TRACE_TYPE_PARTIAL_LINE;
64 return TRACE_TYPE_HANDLED;
68 * trace_seq_printf - sequence printing of trace information
69 * @s: trace sequence descriptor
70 * @fmt: printf format string
72 * The tracer may use either sequence operations or its own
73 * copy to user routines. To simplify formating of a trace
74 * trace_seq_printf is used to store strings into a special
75 * buffer (@s). Then the output may be either used by
76 * the sequencer or pulled into another buffer.
79 trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
81 int len = (PAGE_SIZE - 1) - s->len;
89 ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
92 /* If we can't write it all, don't bother writing anything */
100 EXPORT_SYMBOL_GPL(trace_seq_printf);
103 * trace_seq_vprintf - sequence printing of trace information
104 * @s: trace sequence descriptor
105 * @fmt: printf format string
107 * The tracer may use either sequence operations or its own
108 * copy to user routines. To simplify formating of a trace
109 * trace_seq_printf is used to store strings into a special
110 * buffer (@s). Then the output may be either used by
111 * the sequencer or pulled into another buffer.
114 trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args)
116 int len = (PAGE_SIZE - 1) - s->len;
122 ret = vsnprintf(s->buffer + s->len, len, fmt, args);
124 /* If we can't write it all, don't bother writing anything */
132 EXPORT_SYMBOL_GPL(trace_seq_vprintf);
134 int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
136 int len = (PAGE_SIZE - 1) - s->len;
142 ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
144 /* If we can't write it all, don't bother writing anything */
154 * trace_seq_puts - trace sequence printing of simple string
155 * @s: trace sequence descriptor
156 * @str: simple string to record
158 * The tracer may use either the sequence operations or its own
159 * copy to user routines. This function records a simple string
160 * into a special buffer (@s) for later retrieval by a sequencer
161 * or other mechanism.
163 int trace_seq_puts(struct trace_seq *s, const char *str)
165 int len = strlen(str);
167 if (len > ((PAGE_SIZE - 1) - s->len))
170 memcpy(s->buffer + s->len, str, len);
176 int trace_seq_putc(struct trace_seq *s, unsigned char c)
178 if (s->len >= (PAGE_SIZE - 1))
181 s->buffer[s->len++] = c;
186 int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len)
188 if (len > ((PAGE_SIZE - 1) - s->len))
191 memcpy(s->buffer + s->len, mem, len);
197 int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, size_t len)
199 unsigned char hex[HEX_CHARS];
200 const unsigned char *data = mem;
204 for (i = 0, j = 0; i < len; i++) {
206 for (i = len-1, j = 0; i >= 0; i--) {
208 hex[j++] = hex_asc_hi(data[i]);
209 hex[j++] = hex_asc_lo(data[i]);
213 return trace_seq_putmem(s, hex, j);
216 void *trace_seq_reserve(struct trace_seq *s, size_t len)
220 if (len > ((PAGE_SIZE - 1) - s->len))
223 ret = s->buffer + s->len;
229 int trace_seq_path(struct trace_seq *s, struct path *path)
233 if (s->len >= (PAGE_SIZE - 1))
235 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
237 p = mangle_path(s->buffer + s->len, p, "\n");
239 s->len = p - s->buffer;
243 s->buffer[s->len++] = '?';
251 ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
253 const struct trace_print_flags *flag_array)
257 const char *ret = p->buffer + p->len;
260 for (i = 0; flag_array[i].name && flags; i++) {
262 mask = flag_array[i].mask;
263 if ((flags & mask) != mask)
266 str = flag_array[i].name;
269 trace_seq_puts(p, delim);
270 trace_seq_puts(p, str);
273 /* check for left over flags */
276 trace_seq_puts(p, delim);
277 trace_seq_printf(p, "0x%lx", flags);
280 trace_seq_putc(p, 0);
284 EXPORT_SYMBOL(ftrace_print_flags_seq);
287 ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
288 const struct trace_print_flags *symbol_array)
291 const char *ret = p->buffer + p->len;
293 for (i = 0; symbol_array[i].name; i++) {
295 if (val != symbol_array[i].mask)
298 trace_seq_puts(p, symbol_array[i].name);
303 trace_seq_printf(p, "0x%lx", val);
305 trace_seq_putc(p, 0);
309 EXPORT_SYMBOL(ftrace_print_symbols_seq);
311 #ifdef CONFIG_KRETPROBES
312 static inline const char *kretprobed(const char *name)
314 static const char tramp_name[] = "kretprobe_trampoline";
315 int size = sizeof(tramp_name);
317 if (strncmp(tramp_name, name, size) == 0)
318 return "[unknown/kretprobe'd]";
322 static inline const char *kretprobed(const char *name)
326 #endif /* CONFIG_KRETPROBES */
329 seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
331 #ifdef CONFIG_KALLSYMS
332 char str[KSYM_SYMBOL_LEN];
335 kallsyms_lookup(address, NULL, NULL, NULL, str);
337 name = kretprobed(str);
339 return trace_seq_printf(s, fmt, name);
345 seq_print_sym_offset(struct trace_seq *s, const char *fmt,
346 unsigned long address)
348 #ifdef CONFIG_KALLSYMS
349 char str[KSYM_SYMBOL_LEN];
352 sprint_symbol(str, address);
353 name = kretprobed(str);
355 return trace_seq_printf(s, fmt, name);
361 # define IP_FMT "%08lx"
363 # define IP_FMT "%016lx"
366 int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
367 unsigned long ip, unsigned long sym_flags)
369 struct file *file = NULL;
370 unsigned long vmstart = 0;
374 const struct vm_area_struct *vma;
376 down_read(&mm->mmap_sem);
377 vma = find_vma(mm, ip);
380 vmstart = vma->vm_start;
383 ret = trace_seq_path(s, &file->f_path);
385 ret = trace_seq_printf(s, "[+0x%lx]",
388 up_read(&mm->mmap_sem);
390 if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
391 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
396 seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
397 unsigned long sym_flags)
399 struct mm_struct *mm = NULL;
403 if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
404 struct task_struct *task;
406 * we do the lookup on the thread group leader,
407 * since individual threads might have already quit!
410 task = find_task_by_vpid(entry->tgid);
412 mm = get_task_mm(task);
416 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
417 unsigned long ip = entry->caller[i];
419 if (ip == ULONG_MAX || !ret)
422 ret = trace_seq_puts(s, " => ");
425 ret = trace_seq_puts(s, "??");
427 ret = trace_seq_puts(s, "\n");
433 ret = seq_print_user_ip(s, mm, ip, sym_flags);
434 ret = trace_seq_puts(s, "\n");
443 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
448 return trace_seq_printf(s, "0");
450 if (sym_flags & TRACE_ITER_SYM_OFFSET)
451 ret = seq_print_sym_offset(s, "%s", ip);
453 ret = seq_print_sym_short(s, "%s", ip);
458 if (sym_flags & TRACE_ITER_SYM_ADDR)
459 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
464 lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
466 int hardirq, softirq;
467 char comm[TASK_COMM_LEN];
470 trace_find_cmdline(entry->pid, comm);
471 hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
472 softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
474 if (!trace_seq_printf(s, "%8.8s-%-5d %3d%c%c%c",
475 comm, entry->pid, cpu,
476 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
477 (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ?
479 (entry->flags & TRACE_FLAG_NEED_RESCHED) ?
481 (hardirq && softirq) ? 'H' :
482 hardirq ? 'h' : softirq ? 's' : '.'))
485 if (entry->lock_depth < 0)
486 ret = trace_seq_putc(s, '.');
488 ret = trace_seq_printf(s, "%d", entry->lock_depth);
492 if (entry->preempt_count)
493 return trace_seq_printf(s, "%x", entry->preempt_count);
494 return trace_seq_putc(s, '.');
497 static unsigned long preempt_mark_thresh = 100;
500 lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
501 unsigned long rel_usecs)
503 return trace_seq_printf(s, " %4lldus%c: ", abs_usecs,
504 rel_usecs > preempt_mark_thresh ? '!' :
505 rel_usecs > 1 ? '+' : ' ');
508 int trace_print_context(struct trace_iterator *iter)
510 struct trace_seq *s = &iter->seq;
511 struct trace_entry *entry = iter->ent;
512 unsigned long long t = ns2usecs(iter->ts);
513 unsigned long usec_rem = do_div(t, USEC_PER_SEC);
514 unsigned long secs = (unsigned long)t;
515 char comm[TASK_COMM_LEN];
517 trace_find_cmdline(entry->pid, comm);
519 return trace_seq_printf(s, "%16s-%-5d [%03d] %5lu.%06lu: ",
520 comm, entry->pid, iter->cpu, secs, usec_rem);
523 int trace_print_lat_context(struct trace_iterator *iter)
527 struct trace_seq *s = &iter->seq;
528 struct trace_entry *entry = iter->ent,
529 *next_entry = trace_find_next_entry(iter, NULL,
531 unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
532 unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
533 unsigned long rel_usecs;
537 rel_usecs = ns2usecs(next_ts - iter->ts);
540 char comm[TASK_COMM_LEN];
542 trace_find_cmdline(entry->pid, comm);
544 ret = trace_seq_printf(s, "%16s %5d %3d %d %08x %08lx [%08llx]"
545 " %ld.%03ldms (+%ld.%03ldms): ", comm,
546 entry->pid, iter->cpu, entry->flags,
547 entry->preempt_count, iter->idx,
549 abs_usecs / USEC_PER_MSEC,
550 abs_usecs % USEC_PER_MSEC,
551 rel_usecs / USEC_PER_MSEC,
552 rel_usecs % USEC_PER_MSEC);
554 ret = lat_print_generic(s, entry, iter->cpu);
556 ret = lat_print_timestamp(s, abs_usecs, rel_usecs);
562 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
564 static int task_state_char(unsigned long state)
566 int bit = state ? __ffs(state) + 1 : 0;
568 return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
572 * ftrace_find_event - find a registered event
573 * @type: the type of event to look for
575 * Returns an event of type @type otherwise NULL
576 * Called with trace_event_read_lock() held.
578 struct trace_event *ftrace_find_event(int type)
580 struct trace_event *event;
581 struct hlist_node *n;
584 key = type & (EVENT_HASHSIZE - 1);
586 hlist_for_each_entry(event, n, &event_hash[key], node) {
587 if (event->type == type)
594 static LIST_HEAD(ftrace_event_list);
596 static int trace_search_list(struct list_head **list)
598 struct trace_event *e;
599 int last = __TRACE_LAST_TYPE;
601 if (list_empty(&ftrace_event_list)) {
602 *list = &ftrace_event_list;
607 * We used up all possible max events,
608 * lets see if somebody freed one.
610 list_for_each_entry(e, &ftrace_event_list, list) {
611 if (e->type != last + 1)
616 /* Did we used up all 65 thousand events??? */
617 if ((last + 1) > FTRACE_MAX_EVENT)
624 void trace_event_read_lock(void)
626 down_read(&trace_event_mutex);
629 void trace_event_read_unlock(void)
631 up_read(&trace_event_mutex);
635 * register_ftrace_event - register output for an event type
636 * @event: the event type to register
638 * Event types are stored in a hash and this hash is used to
639 * find a way to print an event. If the @event->type is set
640 * then it will use that type, otherwise it will assign a
643 * If you assign your own type, please make sure it is added
644 * to the trace_type enum in trace.h, to avoid collisions
645 * with the dynamic types.
647 * Returns the event type number or zero on error.
649 int register_ftrace_event(struct trace_event *event)
654 down_write(&trace_event_mutex);
659 INIT_LIST_HEAD(&event->list);
662 struct list_head *list = NULL;
664 if (next_event_type > FTRACE_MAX_EVENT) {
666 event->type = trace_search_list(&list);
672 event->type = next_event_type++;
673 list = &ftrace_event_list;
676 if (WARN_ON(ftrace_find_event(event->type)))
679 list_add_tail(&event->list, list);
681 } else if (event->type > __TRACE_LAST_TYPE) {
682 printk(KERN_WARNING "Need to add type to trace.h\n");
686 /* Is this event already used */
687 if (ftrace_find_event(event->type))
691 if (event->trace == NULL)
692 event->trace = trace_nop_print;
693 if (event->raw == NULL)
694 event->raw = trace_nop_print;
695 if (event->hex == NULL)
696 event->hex = trace_nop_print;
697 if (event->binary == NULL)
698 event->binary = trace_nop_print;
700 key = event->type & (EVENT_HASHSIZE - 1);
702 hlist_add_head(&event->node, &event_hash[key]);
706 up_write(&trace_event_mutex);
710 EXPORT_SYMBOL_GPL(register_ftrace_event);
713 * Used by module code with the trace_event_mutex held for write.
715 int __unregister_ftrace_event(struct trace_event *event)
717 hlist_del(&event->node);
718 list_del(&event->list);
723 * unregister_ftrace_event - remove a no longer used event
724 * @event: the event to remove
726 int unregister_ftrace_event(struct trace_event *event)
728 down_write(&trace_event_mutex);
729 __unregister_ftrace_event(event);
730 up_write(&trace_event_mutex);
734 EXPORT_SYMBOL_GPL(unregister_ftrace_event);
740 enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags)
742 return TRACE_TYPE_HANDLED;
746 static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags)
748 struct ftrace_entry *field;
749 struct trace_seq *s = &iter->seq;
751 trace_assign_type(field, iter->ent);
753 if (!seq_print_ip_sym(s, field->ip, flags))
756 if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) {
757 if (!trace_seq_printf(s, " <-"))
759 if (!seq_print_ip_sym(s,
764 if (!trace_seq_printf(s, "\n"))
767 return TRACE_TYPE_HANDLED;
770 return TRACE_TYPE_PARTIAL_LINE;
773 static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags)
775 struct ftrace_entry *field;
777 trace_assign_type(field, iter->ent);
779 if (!trace_seq_printf(&iter->seq, "%lx %lx\n",
782 return TRACE_TYPE_PARTIAL_LINE;
784 return TRACE_TYPE_HANDLED;
787 static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags)
789 struct ftrace_entry *field;
790 struct trace_seq *s = &iter->seq;
792 trace_assign_type(field, iter->ent);
794 SEQ_PUT_HEX_FIELD_RET(s, field->ip);
795 SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
797 return TRACE_TYPE_HANDLED;
800 static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags)
802 struct ftrace_entry *field;
803 struct trace_seq *s = &iter->seq;
805 trace_assign_type(field, iter->ent);
807 SEQ_PUT_FIELD_RET(s, field->ip);
808 SEQ_PUT_FIELD_RET(s, field->parent_ip);
810 return TRACE_TYPE_HANDLED;
813 static struct trace_event trace_fn_event = {
815 .trace = trace_fn_trace,
818 .binary = trace_fn_bin,
821 /* TRACE_CTX an TRACE_WAKE */
822 static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
825 struct ctx_switch_entry *field;
826 char comm[TASK_COMM_LEN];
830 trace_assign_type(field, iter->ent);
832 T = task_state_char(field->next_state);
833 S = task_state_char(field->prev_state);
834 trace_find_cmdline(field->next_pid, comm);
835 if (!trace_seq_printf(&iter->seq,
836 " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
844 return TRACE_TYPE_PARTIAL_LINE;
846 return TRACE_TYPE_HANDLED;
849 static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags)
851 return trace_ctxwake_print(iter, "==>");
854 static enum print_line_t trace_wake_print(struct trace_iterator *iter,
857 return trace_ctxwake_print(iter, " +");
860 static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
862 struct ctx_switch_entry *field;
865 trace_assign_type(field, iter->ent);
868 task_state_char(field->prev_state);
869 T = task_state_char(field->next_state);
870 if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
878 return TRACE_TYPE_PARTIAL_LINE;
880 return TRACE_TYPE_HANDLED;
883 static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags)
885 return trace_ctxwake_raw(iter, 0);
888 static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags)
890 return trace_ctxwake_raw(iter, '+');
894 static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
896 struct ctx_switch_entry *field;
897 struct trace_seq *s = &iter->seq;
900 trace_assign_type(field, iter->ent);
903 task_state_char(field->prev_state);
904 T = task_state_char(field->next_state);
906 SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
907 SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
908 SEQ_PUT_HEX_FIELD_RET(s, S);
909 SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu);
910 SEQ_PUT_HEX_FIELD_RET(s, field->next_pid);
911 SEQ_PUT_HEX_FIELD_RET(s, field->next_prio);
912 SEQ_PUT_HEX_FIELD_RET(s, T);
914 return TRACE_TYPE_HANDLED;
917 static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags)
919 return trace_ctxwake_hex(iter, 0);
922 static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags)
924 return trace_ctxwake_hex(iter, '+');
927 static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter,
930 struct ctx_switch_entry *field;
931 struct trace_seq *s = &iter->seq;
933 trace_assign_type(field, iter->ent);
935 SEQ_PUT_FIELD_RET(s, field->prev_pid);
936 SEQ_PUT_FIELD_RET(s, field->prev_prio);
937 SEQ_PUT_FIELD_RET(s, field->prev_state);
938 SEQ_PUT_FIELD_RET(s, field->next_pid);
939 SEQ_PUT_FIELD_RET(s, field->next_prio);
940 SEQ_PUT_FIELD_RET(s, field->next_state);
942 return TRACE_TYPE_HANDLED;
945 static struct trace_event trace_ctx_event = {
947 .trace = trace_ctx_print,
948 .raw = trace_ctx_raw,
949 .hex = trace_ctx_hex,
950 .binary = trace_ctxwake_bin,
953 static struct trace_event trace_wake_event = {
955 .trace = trace_wake_print,
956 .raw = trace_wake_raw,
957 .hex = trace_wake_hex,
958 .binary = trace_ctxwake_bin,
962 static enum print_line_t trace_special_print(struct trace_iterator *iter,
965 struct special_entry *field;
967 trace_assign_type(field, iter->ent);
969 if (!trace_seq_printf(&iter->seq, "# %ld %ld %ld\n",
973 return TRACE_TYPE_PARTIAL_LINE;
975 return TRACE_TYPE_HANDLED;
978 static enum print_line_t trace_special_hex(struct trace_iterator *iter,
981 struct special_entry *field;
982 struct trace_seq *s = &iter->seq;
984 trace_assign_type(field, iter->ent);
986 SEQ_PUT_HEX_FIELD_RET(s, field->arg1);
987 SEQ_PUT_HEX_FIELD_RET(s, field->arg2);
988 SEQ_PUT_HEX_FIELD_RET(s, field->arg3);
990 return TRACE_TYPE_HANDLED;
993 static enum print_line_t trace_special_bin(struct trace_iterator *iter,
996 struct special_entry *field;
997 struct trace_seq *s = &iter->seq;
999 trace_assign_type(field, iter->ent);
1001 SEQ_PUT_FIELD_RET(s, field->arg1);
1002 SEQ_PUT_FIELD_RET(s, field->arg2);
1003 SEQ_PUT_FIELD_RET(s, field->arg3);
1005 return TRACE_TYPE_HANDLED;
1008 static struct trace_event trace_special_event = {
1009 .type = TRACE_SPECIAL,
1010 .trace = trace_special_print,
1011 .raw = trace_special_print,
1012 .hex = trace_special_hex,
1013 .binary = trace_special_bin,
1018 static enum print_line_t trace_stack_print(struct trace_iterator *iter,
1021 struct stack_entry *field;
1022 struct trace_seq *s = &iter->seq;
1025 trace_assign_type(field, iter->ent);
1027 if (!trace_seq_puts(s, "<stack trace>\n"))
1029 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1030 if (!field->caller[i] || (field->caller[i] == ULONG_MAX))
1032 if (!trace_seq_puts(s, " => "))
1035 if (!seq_print_ip_sym(s, field->caller[i], flags))
1037 if (!trace_seq_puts(s, "\n"))
1041 return TRACE_TYPE_HANDLED;
1044 return TRACE_TYPE_PARTIAL_LINE;
1047 static struct trace_event trace_stack_event = {
1048 .type = TRACE_STACK,
1049 .trace = trace_stack_print,
1050 .raw = trace_special_print,
1051 .hex = trace_special_hex,
1052 .binary = trace_special_bin,
1055 /* TRACE_USER_STACK */
1056 static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
1059 struct userstack_entry *field;
1060 struct trace_seq *s = &iter->seq;
1062 trace_assign_type(field, iter->ent);
1064 if (!trace_seq_puts(s, "<user stack trace>\n"))
1067 if (!seq_print_userip_objs(field, s, flags))
1070 return TRACE_TYPE_HANDLED;
1073 return TRACE_TYPE_PARTIAL_LINE;
1076 static struct trace_event trace_user_stack_event = {
1077 .type = TRACE_USER_STACK,
1078 .trace = trace_user_stack_print,
1079 .raw = trace_special_print,
1080 .hex = trace_special_hex,
1081 .binary = trace_special_bin,
1085 static enum print_line_t
1086 trace_bprint_print(struct trace_iterator *iter, int flags)
1088 struct trace_entry *entry = iter->ent;
1089 struct trace_seq *s = &iter->seq;
1090 struct bprint_entry *field;
1092 trace_assign_type(field, entry);
1094 if (!seq_print_ip_sym(s, field->ip, flags))
1097 if (!trace_seq_puts(s, ": "))
1100 if (!trace_seq_bprintf(s, field->fmt, field->buf))
1103 return TRACE_TYPE_HANDLED;
1106 return TRACE_TYPE_PARTIAL_LINE;
1110 static enum print_line_t
1111 trace_bprint_raw(struct trace_iterator *iter, int flags)
1113 struct bprint_entry *field;
1114 struct trace_seq *s = &iter->seq;
1116 trace_assign_type(field, iter->ent);
1118 if (!trace_seq_printf(s, ": %lx : ", field->ip))
1121 if (!trace_seq_bprintf(s, field->fmt, field->buf))
1124 return TRACE_TYPE_HANDLED;
1127 return TRACE_TYPE_PARTIAL_LINE;
1131 static struct trace_event trace_bprint_event = {
1132 .type = TRACE_BPRINT,
1133 .trace = trace_bprint_print,
1134 .raw = trace_bprint_raw,
1138 static enum print_line_t trace_print_print(struct trace_iterator *iter,
1141 struct print_entry *field;
1142 struct trace_seq *s = &iter->seq;
1144 trace_assign_type(field, iter->ent);
1146 if (!seq_print_ip_sym(s, field->ip, flags))
1149 if (!trace_seq_printf(s, ": %s", field->buf))
1152 return TRACE_TYPE_HANDLED;
1155 return TRACE_TYPE_PARTIAL_LINE;
1158 static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags)
1160 struct print_entry *field;
1162 trace_assign_type(field, iter->ent);
1164 if (!trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf))
1167 return TRACE_TYPE_HANDLED;
1170 return TRACE_TYPE_PARTIAL_LINE;
1173 static struct trace_event trace_print_event = {
1174 .type = TRACE_PRINT,
1175 .trace = trace_print_print,
1176 .raw = trace_print_raw,
1180 static struct trace_event *events[] __initdata = {
1184 &trace_special_event,
1186 &trace_user_stack_event,
1187 &trace_bprint_event,
1192 __init static int init_events(void)
1194 struct trace_event *event;
1197 for (i = 0; events[i]; i++) {
1200 ret = register_ftrace_event(event);
1202 printk(KERN_WARNING "event %d failed to register\n",
1210 device_initcall(init_events);