2 * Stage 1 of the trace events.
4 * Override the macros in <trace/trace_events.h> to include the following:
6 * struct ftrace_raw_<call> {
7 * struct trace_entry ent;
9 * <type2> <item2>[<len>];
13 * The <type> <item> is created by the __field(type, item) macro or
14 * the __array(type2, item2, len) macro.
15 * We simply do "type item;", and that will create the fields
19 #include <linux/ftrace_event.h>
22 #define __array(type, item, len) type item[len];
25 #define __field(type, item) type item;
28 #define __string(item, src) unsigned short __str_loc_##item;
30 #undef TP_STRUCT__entry
31 #define TP_STRUCT__entry(args...) args
34 #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
35 struct ftrace_raw_##name { \
36 struct trace_entry ent; \
40 static struct ftrace_event_call event_##name
42 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
46 * Stage 2 of the trace events.
48 * Include the following:
50 * struct ftrace_str_offsets_<call> {
56 * The __string() macro will create each int <str>, this is to
57 * keep the offset of each string from the beggining of the event
58 * once we perform the strlen() of the src strings.
63 #define __array(type, item, len)
66 #define __field(type, item);
69 #define __string(item, src) int item;
72 #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
73 struct ftrace_str_offsets_##call { \
77 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
80 * Stage 3 of the trace events.
82 * Override the macros in <trace/trace_events.h> to include the following:
85 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
87 * struct trace_seq *s = &iter->seq;
88 * struct ftrace_raw_<call> *field; <-- defined in stage 1
89 * struct trace_entry *entry;
90 * struct trace_seq *p;
95 * if (entry->type != event_<call>.id) {
97 * return TRACE_TYPE_UNHANDLED;
100 * field = (typeof(field))entry;
102 * p = get_cpu_var(ftrace_event_seq);
103 * ret = trace_seq_printf(s, <TP_printk> "\n");
106 * return TRACE_TYPE_PARTIAL_LINE;
108 * return TRACE_TYPE_HANDLED;
111 * This is the method used to print the raw event to the trace
112 * output format. Note, this is not needed if the data is read
117 #define __entry field
120 #define TP_printk(fmt, args...) fmt "\n", args
123 #define __get_str(field) ((char *)__entry + __entry->__str_loc_##field)
126 #define __print_flags(flag, delim, flag_array...) \
128 static const struct trace_print_flags flags[] = \
129 { flag_array, { -1, NULL }}; \
130 ftrace_print_flags_seq(p, delim, flag, flags); \
133 #undef __print_symbolic
134 #define __print_symbolic(value, symbol_array...) \
136 static const struct trace_print_flags symbols[] = \
137 { symbol_array, { -1, NULL }}; \
138 ftrace_print_symbols_seq(p, value, symbols); \
142 #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
144 ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
146 struct trace_seq *s = &iter->seq; \
147 struct ftrace_raw_##call *field; \
148 struct trace_entry *entry; \
149 struct trace_seq *p; \
154 if (entry->type != event_##call.id) { \
156 return TRACE_TYPE_UNHANDLED; \
159 field = (typeof(field))entry; \
161 p = &get_cpu_var(ftrace_event_seq); \
162 ret = trace_seq_printf(s, #call ": " print); \
165 return TRACE_TYPE_PARTIAL_LINE; \
167 return TRACE_TYPE_HANDLED; \
170 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
173 * Setup the showing format of trace point.
176 * ftrace_format_##call(struct trace_seq *s)
178 * struct ftrace_raw_##call field;
181 * ret = trace_seq_printf(s, #type " " #item ";"
182 * " offset:%u; size:%u;\n",
183 * offsetof(struct ftrace_raw_##call, item),
184 * sizeof(field.type));
189 #undef TP_STRUCT__entry
190 #define TP_STRUCT__entry(args...) args
193 #define __field(type, item) \
194 ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
195 "offset:%u;\tsize:%u;\n", \
196 (unsigned int)offsetof(typeof(field), item), \
197 (unsigned int)sizeof(field.item)); \
202 #define __array(type, item, len) \
203 ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
204 "offset:%u;\tsize:%u;\n", \
205 (unsigned int)offsetof(typeof(field), item), \
206 (unsigned int)sizeof(field.item)); \
211 #define __string(item, src) \
212 ret = trace_seq_printf(s, "\tfield: __str_loc " #item ";\t" \
213 "offset:%u;tsize:%u;\n", \
214 (unsigned int)offsetof(typeof(field), \
216 (unsigned int)sizeof(field.__str_loc_##item)); \
224 #define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args)
226 #undef TP_fast_assign
227 #define TP_fast_assign(args...) args
230 #define TRACE_EVENT(call, proto, args, tstruct, func, print) \
232 ftrace_format_##call(struct trace_seq *s) \
234 struct ftrace_raw_##call field __attribute__((unused)); \
239 trace_seq_printf(s, "\nprint fmt: " print); \
244 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
247 #define __field(type, item) \
248 ret = trace_define_field(event_call, #type, #item, \
249 offsetof(typeof(field), item), \
250 sizeof(field.item), is_signed_type(type)); \
255 #define __array(type, item, len) \
256 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
257 ret = trace_define_field(event_call, #type "[" #len "]", #item, \
258 offsetof(typeof(field), item), \
259 sizeof(field.item), 0); \
264 #define __string(item, src) \
265 ret = trace_define_field(event_call, "__str_loc", #item, \
266 offsetof(typeof(field), __str_loc_##item), \
267 sizeof(field.__str_loc_##item), 0);
270 #define TRACE_EVENT(call, proto, args, tstruct, func, print) \
272 ftrace_define_fields_##call(void) \
274 struct ftrace_raw_##call field; \
275 struct ftrace_event_call *event_call = &event_##call; \
278 __common_field(int, type, 1); \
279 __common_field(unsigned char, flags, 0); \
280 __common_field(unsigned char, preempt_count, 0); \
281 __common_field(int, pid, 1); \
282 __common_field(int, tgid, 1); \
289 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
292 * Stage 4 of the trace events.
294 * Override the macros in <trace/trace_events.h> to include the following:
296 * static void ftrace_event_<call>(proto)
298 * event_trace_printk(_RET_IP_, "<call>: " <fmt>);
301 * static int ftrace_reg_event_<call>(void)
305 * ret = register_trace_<call>(ftrace_event_<call>);
307 * pr_info("event trace: Could not activate trace point "
308 * "probe to <call>");
312 * static void ftrace_unreg_event_<call>(void)
314 * unregister_trace_<call>(ftrace_event_<call>);
318 * For those macros defined with TRACE_EVENT:
320 * static struct ftrace_event_call event_<call>;
322 * static void ftrace_raw_event_<call>(proto)
324 * struct ring_buffer_event *event;
325 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
326 * unsigned long irq_flags;
329 * local_save_flags(irq_flags);
330 * pc = preempt_count();
332 * event = trace_current_buffer_lock_reserve(event_<call>.id,
333 * sizeof(struct ftrace_raw_<call>),
337 * entry = ring_buffer_event_data(event);
339 * <assign>; <-- Here we assign the entries by the __field and
342 * trace_current_buffer_unlock_commit(event, irq_flags, pc);
345 * static int ftrace_raw_reg_event_<call>(void)
349 * ret = register_trace_<call>(ftrace_raw_event_<call>);
351 * pr_info("event trace: Could not activate trace point "
352 * "probe to <call>");
356 * static void ftrace_unreg_event_<call>(void)
358 * unregister_trace_<call>(ftrace_raw_event_<call>);
361 * static struct trace_event ftrace_event_type_<call> = {
362 * .trace = ftrace_raw_output_<call>, <-- stage 2
365 * static int ftrace_raw_init_event_<call>(void)
369 * id = register_ftrace_event(&ftrace_event_type_<call>);
372 * event_<call>.id = id;
376 * static struct ftrace_event_call __used
377 * __attribute__((__aligned__(4)))
378 * __attribute__((section("_ftrace_events"))) event_<call> = {
380 * .system = "<system>",
381 * .raw_init = ftrace_raw_init_event_<call>,
382 * .regfunc = ftrace_reg_event_<call>,
383 * .unregfunc = ftrace_unreg_event_<call>,
384 * .show_format = ftrace_format_<call>,
390 #define TP_FMT(fmt, args...) fmt "\n", ##args
392 #ifdef CONFIG_EVENT_PROFILE
393 #define _TRACE_PROFILE(call, proto, args) \
394 static void ftrace_profile_##call(proto) \
396 extern void perf_tpcounter_event(int); \
397 perf_tpcounter_event(event_##call.id); \
400 static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \
404 if (!atomic_inc_return(&event_call->profile_count)) \
405 ret = register_trace_##call(ftrace_profile_##call); \
410 static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
412 if (atomic_add_negative(-1, &event_call->profile_count)) \
413 unregister_trace_##call(ftrace_profile_##call); \
416 #define _TRACE_PROFILE_INIT(call) \
417 .profile_count = ATOMIC_INIT(-1), \
418 .profile_enable = ftrace_profile_enable_##call, \
419 .profile_disable = ftrace_profile_disable_##call,
422 #define _TRACE_PROFILE(call, proto, args)
423 #define _TRACE_PROFILE_INIT(call)
427 #define __entry entry
430 #define __field(type, item)
433 #define __array(type, item, len)
436 #define __string(item, src) \
437 __str_offsets.item = __str_size + \
438 offsetof(typeof(*entry), __str_data); \
439 __str_size += strlen(src) + 1;
442 #define __assign_str(dst, src) \
443 __entry->__str_loc_##dst = __str_offsets.dst; \
444 strcpy(__get_str(dst), src);
447 #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
448 _TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \
450 static struct ftrace_event_call event_##call; \
452 static void ftrace_raw_event_##call(proto) \
454 struct ftrace_str_offsets_##call __maybe_unused __str_offsets; \
455 struct ftrace_event_call *event_call = &event_##call; \
456 struct ring_buffer_event *event; \
457 struct ftrace_raw_##call *entry; \
458 unsigned long irq_flags; \
459 int __str_size = 0; \
462 local_save_flags(irq_flags); \
463 pc = preempt_count(); \
467 event = trace_current_buffer_lock_reserve(event_##call.id, \
468 sizeof(struct ftrace_raw_##call) + __str_size,\
472 entry = ring_buffer_event_data(event); \
476 if (!filter_current_check_discard(event_call, entry, event)) \
477 trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \
480 static int ftrace_raw_reg_event_##call(void) \
484 ret = register_trace_##call(ftrace_raw_event_##call); \
486 pr_info("event trace: Could not activate trace point " \
487 "probe to " #call "\n"); \
491 static void ftrace_raw_unreg_event_##call(void) \
493 unregister_trace_##call(ftrace_raw_event_##call); \
496 static struct trace_event ftrace_event_type_##call = { \
497 .trace = ftrace_raw_output_##call, \
500 static int ftrace_raw_init_event_##call(void) \
504 id = register_ftrace_event(&ftrace_event_type_##call); \
507 event_##call.id = id; \
508 INIT_LIST_HEAD(&event_##call.fields); \
509 init_preds(&event_##call); \
513 static struct ftrace_event_call __used \
514 __attribute__((__aligned__(4))) \
515 __attribute__((section("_ftrace_events"))) event_##call = { \
517 .system = __stringify(TRACE_SYSTEM), \
518 .event = &ftrace_event_type_##call, \
519 .raw_init = ftrace_raw_init_event_##call, \
520 .regfunc = ftrace_raw_reg_event_##call, \
521 .unregfunc = ftrace_raw_unreg_event_##call, \
522 .show_format = ftrace_format_##call, \
523 .define_fields = ftrace_define_fields_##call, \
524 _TRACE_PROFILE_INIT(call) \
527 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
529 #undef _TRACE_PROFILE
530 #undef _TRACE_PROFILE_INIT