1 #ifndef _LINUX_FTRACE_EVENT_H
2 #define _LINUX_FTRACE_EVENT_H
4 #include <linux/trace_seq.h>
5 #include <linux/ring_buffer.h>
6 #include <linux/percpu.h>
12 DECLARE_PER_CPU(struct trace_seq, ftrace_event_seq);
14 struct trace_print_flags {
19 const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
21 const struct trace_print_flags *flag_array);
24 * The trace entry - the most basic unit of tracing. This is what
25 * is printed in the end as a single line in the trace output, such as:
27 * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
32 unsigned char preempt_count;
37 #define FTRACE_MAX_EVENT \
38 ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1)
41 * Trace iterator - used by printout routines who present trace
42 * results to users and which routines might sleep, etc:
44 struct trace_iterator {
45 struct trace_array *tr;
50 struct ring_buffer_iter *buffer_iter[NR_CPUS];
52 /* The below is zeroed out in pipe_read */
54 struct trace_entry *ent;
58 unsigned long iter_flags;
62 cpumask_var_t started;
66 typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
69 struct hlist_node node;
70 struct list_head list;
72 trace_print_func trace;
75 trace_print_func binary;
78 extern int register_ftrace_event(struct trace_event *event);
79 extern int unregister_ftrace_event(struct trace_event *event);
81 /* Return values for print_line callback */
83 TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */
84 TRACE_TYPE_HANDLED = 1,
85 TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */
86 TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
90 struct ring_buffer_event *
91 trace_current_buffer_lock_reserve(int type, unsigned long len,
92 unsigned long flags, int pc);
93 void trace_current_buffer_unlock_commit(struct ring_buffer_event *event,
94 unsigned long flags, int pc);
95 void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event,
96 unsigned long flags, int pc);
97 void trace_current_buffer_discard_commit(struct ring_buffer_event *event);
99 void tracing_record_cmdline(struct task_struct *tsk);
101 struct ftrace_event_call {
102 struct list_head list;
106 struct trace_event *event;
108 int (*regfunc)(void);
109 void (*unregfunc)(void);
111 int (*raw_init)(void);
112 int (*show_format)(struct trace_seq *s);
113 int (*define_fields)(void);
114 struct list_head fields;
119 #ifdef CONFIG_EVENT_PROFILE
120 atomic_t profile_count;
121 int (*profile_enable)(struct ftrace_event_call *);
122 void (*profile_disable)(struct ftrace_event_call *);
126 #define MAX_FILTER_PRED 32
127 #define MAX_FILTER_STR_VAL 128
129 extern int init_preds(struct ftrace_event_call *call);
130 extern void destroy_preds(struct ftrace_event_call *call);
131 extern int filter_match_preds(struct ftrace_event_call *call, void *rec);
132 extern int filter_current_check_discard(struct ftrace_event_call *call,
134 struct ring_buffer_event *event);
136 extern int trace_define_field(struct ftrace_event_call *call, char *type,
137 char *name, int offset, int size, int is_signed);
139 #define is_signed_type(type) (((type)(-1)) < 0)
141 int trace_set_clr_event(const char *system, const char *event, int set);
144 * The double __builtin_constant_p is because gcc will give us an error
145 * if we try to allocate the static variable to fmt if it is not a
146 * constant. Even with the outer if statement optimizing out.
148 #define event_trace_printk(ip, fmt, args...) \
150 __trace_printk_check_format(fmt, ##args); \
151 tracing_record_cmdline(current); \
152 if (__builtin_constant_p(fmt)) { \
153 static const char *trace_printk_fmt \
154 __attribute__((section("__trace_printk_fmt"))) = \
155 __builtin_constant_p(fmt) ? fmt : NULL; \
157 __trace_bprintk(ip, trace_printk_fmt, ##args); \
159 __trace_printk(ip, fmt, ##args); \
162 #define __common_field(type, item, is_signed) \
163 ret = trace_define_field(event_call, #type, "common_" #item, \
164 offsetof(typeof(field.ent), item), \
165 sizeof(field.ent.item), is_signed); \
169 #endif /* _LINUX_FTRACE_EVENT_H */