2 * Stage 1 of the trace events.
4 * Override the macros in <trace/trace_events.h> to include the following:
6 * struct ftrace_raw_<call> {
7 * struct trace_entry ent;
9 * <type2> <item2>[<len>];
13 * The <type> <item> is created by the __field(type, item) macro or
14 * the __array(type2, item2, len) macro.
15 * We simply do "type item;", and that will create the fields
19 #include <linux/ftrace_event.h>
22 #define TRACE_FORMAT(call, proto, args, fmt)
25 #define __array(type, item, len) type item[len];
28 #define __field(type, item) type item;
31 #define __string(item, src) int __str_loc_##item;
33 #undef TP_STRUCT__entry
34 #define TP_STRUCT__entry(args...) args
37 #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
38 struct ftrace_raw_##name { \
39 struct trace_entry ent; \
43 static struct ftrace_event_call event_##name
45 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
49 * Stage 2 of the trace events.
51 * Include the following:
53 * struct ftrace_str_offsets_<call> {
59 * The __string() macro will create each int <str>, this is to
60 * keep the offset of each string from the beggining of the event
61 * once we perform the strlen() of the src strings.
66 #define TRACE_FORMAT(call, proto, args, fmt)
69 #define __array(type, item, len)
72 #define __field(type, item);
75 #define __string(item, src) int item;
78 #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
79 struct ftrace_str_offsets_##call { \
83 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
86 * Stage 3 of the trace events.
88 * Override the macros in <trace/trace_events.h> to include the following:
91 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
93 * struct trace_seq *s = &iter->seq;
94 * struct ftrace_raw_<call> *field; <-- defined in stage 1
95 * struct trace_entry *entry;
100 * if (entry->type != event_<call>.id) {
102 * return TRACE_TYPE_UNHANDLED;
105 * field = (typeof(field))entry;
107 * ret = trace_seq_printf(s, <TP_printk> "\n");
109 * return TRACE_TYPE_PARTIAL_LINE;
111 * return TRACE_TYPE_HANDLED;
114 * This is the method used to print the raw event to the trace
115 * output format. Note, this is not needed if the data is read
120 #define __entry field
123 #define TP_printk(fmt, args...) fmt "\n", args
126 #define __get_str(field) ((char *)__entry + __entry->__str_loc_##field)
129 #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
131 ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
133 struct trace_seq *s = &iter->seq; \
134 struct ftrace_raw_##call *field; \
135 struct trace_entry *entry; \
140 if (entry->type != event_##call.id) { \
142 return TRACE_TYPE_UNHANDLED; \
145 field = (typeof(field))entry; \
147 ret = trace_seq_printf(s, #call ": " print); \
149 return TRACE_TYPE_PARTIAL_LINE; \
151 return TRACE_TYPE_HANDLED; \
154 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
157 * Setup the showing format of trace point.
160 * ftrace_format_##call(struct trace_seq *s)
162 * struct ftrace_raw_##call field;
165 * ret = trace_seq_printf(s, #type " " #item ";"
166 * " offset:%u; size:%u;\n",
167 * offsetof(struct ftrace_raw_##call, item),
168 * sizeof(field.type));
173 #undef TP_STRUCT__entry
174 #define TP_STRUCT__entry(args...) args
177 #define __field(type, item) \
178 ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
179 "offset:%u;\tsize:%u;\n", \
180 (unsigned int)offsetof(typeof(field), item), \
181 (unsigned int)sizeof(field.item)); \
186 #define __array(type, item, len) \
187 ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
188 "offset:%u;\tsize:%u;\n", \
189 (unsigned int)offsetof(typeof(field), item), \
190 (unsigned int)sizeof(field.item)); \
195 #define __string(item, src) \
196 ret = trace_seq_printf(s, "\tfield: __str_loc " #item ";\t" \
197 "offset:%u;tsize:%u;\n", \
198 (unsigned int)offsetof(typeof(field), \
200 (unsigned int)sizeof(field.__str_loc_##item)); \
208 #define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args)
210 #undef TP_fast_assign
211 #define TP_fast_assign(args...) args
214 #define TRACE_EVENT(call, proto, args, tstruct, func, print) \
216 ftrace_format_##call(struct trace_seq *s) \
218 struct ftrace_raw_##call field __attribute__((unused)); \
223 trace_seq_printf(s, "\nprint fmt: " print); \
228 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
231 #define __field(type, item) \
232 ret = trace_define_field(event_call, #type, #item, \
233 offsetof(typeof(field), item), \
234 sizeof(field.item)); \
239 #define __array(type, item, len) \
240 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
241 ret = trace_define_field(event_call, #type "[" #len "]", #item, \
242 offsetof(typeof(field), item), \
243 sizeof(field.item)); \
248 #define __string(item, src) \
249 ret = trace_define_field(event_call, "__str_loc", #item, \
250 offsetof(typeof(field), __str_loc_##item), \
251 sizeof(field.__str_loc_##item));
254 #define TRACE_EVENT(call, proto, args, tstruct, func, print) \
256 ftrace_define_fields_##call(void) \
258 struct ftrace_raw_##call field; \
259 struct ftrace_event_call *event_call = &event_##call; \
262 __common_field(int, type); \
263 __common_field(unsigned char, flags); \
264 __common_field(unsigned char, preempt_count); \
265 __common_field(int, pid); \
266 __common_field(int, tgid); \
273 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
276 * Stage 4 of the trace events.
278 * Override the macros in <trace/trace_events.h> to include the following:
280 * static void ftrace_event_<call>(proto)
282 * event_trace_printk(_RET_IP_, "<call>: " <fmt>);
285 * static int ftrace_reg_event_<call>(void)
289 * ret = register_trace_<call>(ftrace_event_<call>);
291 * pr_info("event trace: Could not activate trace point "
292 * "probe to <call>");
296 * static void ftrace_unreg_event_<call>(void)
298 * unregister_trace_<call>(ftrace_event_<call>);
301 * For those macros defined with TRACE_FORMAT:
303 * static struct ftrace_event_call __used
304 * __attribute__((__aligned__(4)))
305 * __attribute__((section("_ftrace_events"))) event_<call> = {
307 * .regfunc = ftrace_reg_event_<call>,
308 * .unregfunc = ftrace_unreg_event_<call>,
312 * For those macros defined with TRACE_EVENT:
314 * static struct ftrace_event_call event_<call>;
316 * static void ftrace_raw_event_<call>(proto)
318 * struct ring_buffer_event *event;
319 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
320 * unsigned long irq_flags;
323 * local_save_flags(irq_flags);
324 * pc = preempt_count();
326 * event = trace_current_buffer_lock_reserve(event_<call>.id,
327 * sizeof(struct ftrace_raw_<call>),
331 * entry = ring_buffer_event_data(event);
333 * <assign>; <-- Here we assign the entries by the __field and
336 * trace_current_buffer_unlock_commit(event, irq_flags, pc);
339 * static int ftrace_raw_reg_event_<call>(void)
343 * ret = register_trace_<call>(ftrace_raw_event_<call>);
345 * pr_info("event trace: Could not activate trace point "
346 * "probe to <call>");
350 * static void ftrace_unreg_event_<call>(void)
352 * unregister_trace_<call>(ftrace_raw_event_<call>);
355 * static struct trace_event ftrace_event_type_<call> = {
356 * .trace = ftrace_raw_output_<call>, <-- stage 2
359 * static int ftrace_raw_init_event_<call>(void)
363 * id = register_ftrace_event(&ftrace_event_type_<call>);
366 * event_<call>.id = id;
370 * static struct ftrace_event_call __used
371 * __attribute__((__aligned__(4)))
372 * __attribute__((section("_ftrace_events"))) event_<call> = {
374 * .system = "<system>",
375 * .raw_init = ftrace_raw_init_event_<call>,
376 * .regfunc = ftrace_reg_event_<call>,
377 * .unregfunc = ftrace_unreg_event_<call>,
378 * .show_format = ftrace_format_<call>,
384 #define TP_FMT(fmt, args...) fmt "\n", ##args
386 #ifdef CONFIG_EVENT_PROFILE
387 #define _TRACE_PROFILE(call, proto, args) \
388 static void ftrace_profile_##call(proto) \
390 extern void perf_tpcounter_event(int); \
391 perf_tpcounter_event(event_##call.id); \
394 static int ftrace_profile_enable_##call(struct ftrace_event_call *call) \
398 if (!atomic_inc_return(&call->profile_count)) \
399 ret = register_trace_##call(ftrace_profile_##call); \
404 static void ftrace_profile_disable_##call(struct ftrace_event_call *call) \
406 if (atomic_add_negative(-1, &call->profile_count)) \
407 unregister_trace_##call(ftrace_profile_##call); \
410 #define _TRACE_PROFILE_INIT(call) \
411 .profile_count = ATOMIC_INIT(-1), \
412 .profile_enable = ftrace_profile_enable_##call, \
413 .profile_disable = ftrace_profile_disable_##call,
416 #define _TRACE_PROFILE(call, proto, args)
417 #define _TRACE_PROFILE_INIT(call)
420 #define _TRACE_FORMAT(call, proto, args, fmt) \
421 static void ftrace_event_##call(proto) \
423 event_trace_printk(_RET_IP_, #call ": " fmt); \
426 static int ftrace_reg_event_##call(void) \
430 ret = register_trace_##call(ftrace_event_##call); \
432 pr_info("event trace: Could not activate trace point " \
433 "probe to " #call "\n"); \
437 static void ftrace_unreg_event_##call(void) \
439 unregister_trace_##call(ftrace_event_##call); \
442 static struct ftrace_event_call event_##call; \
444 static int ftrace_init_event_##call(void) \
448 id = register_ftrace_event(NULL); \
451 event_##call.id = id; \
456 #define TRACE_FORMAT(call, proto, args, fmt) \
457 _TRACE_FORMAT(call, PARAMS(proto), PARAMS(args), PARAMS(fmt)) \
458 _TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \
459 static struct ftrace_event_call __used \
460 __attribute__((__aligned__(4))) \
461 __attribute__((section("_ftrace_events"))) event_##call = { \
463 .system = __stringify(TRACE_SYSTEM), \
464 .raw_init = ftrace_init_event_##call, \
465 .regfunc = ftrace_reg_event_##call, \
466 .unregfunc = ftrace_unreg_event_##call, \
467 _TRACE_PROFILE_INIT(call) \
471 #define __entry entry
474 #define __field(type, item)
477 #define __array(type, item, len)
480 #define __string(item, src) \
481 __str_offsets.item = __str_size + \
482 offsetof(typeof(*entry), __str_data); \
483 __str_size += strlen(src) + 1;
486 #define __assign_str(dst, src) \
487 __entry->__str_loc_##dst = __str_offsets.dst; \
488 strcpy(__get_str(dst), src);
491 #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
492 _TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \
494 static struct ftrace_event_call event_##call; \
496 static void ftrace_raw_event_##call(proto) \
498 struct ftrace_str_offsets_##call __maybe_unused __str_offsets; \
499 struct ftrace_event_call *call = &event_##call; \
500 struct ring_buffer_event *event; \
501 struct ftrace_raw_##call *entry; \
502 unsigned long irq_flags; \
503 int __str_size = 0; \
506 local_save_flags(irq_flags); \
507 pc = preempt_count(); \
511 event = trace_current_buffer_lock_reserve(event_##call.id, \
512 sizeof(struct ftrace_raw_##call) + __str_size,\
516 entry = ring_buffer_event_data(event); \
520 if (!filter_current_check_discard(call, entry, event)) \
521 trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \
524 static int ftrace_raw_reg_event_##call(void) \
528 ret = register_trace_##call(ftrace_raw_event_##call); \
530 pr_info("event trace: Could not activate trace point " \
531 "probe to " #call "\n"); \
535 static void ftrace_raw_unreg_event_##call(void) \
537 unregister_trace_##call(ftrace_raw_event_##call); \
540 static struct trace_event ftrace_event_type_##call = { \
541 .trace = ftrace_raw_output_##call, \
544 static int ftrace_raw_init_event_##call(void) \
548 id = register_ftrace_event(&ftrace_event_type_##call); \
551 event_##call.id = id; \
552 INIT_LIST_HEAD(&event_##call.fields); \
553 init_preds(&event_##call); \
557 static struct ftrace_event_call __used \
558 __attribute__((__aligned__(4))) \
559 __attribute__((section("_ftrace_events"))) event_##call = { \
561 .system = __stringify(TRACE_SYSTEM), \
562 .event = &ftrace_event_type_##call, \
563 .raw_init = ftrace_raw_init_event_##call, \
564 .regfunc = ftrace_raw_reg_event_##call, \
565 .unregfunc = ftrace_raw_unreg_event_##call, \
566 .show_format = ftrace_format_##call, \
567 .define_fields = ftrace_define_fields_##call, \
568 _TRACE_PROFILE_INIT(call) \
571 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
573 #undef _TRACE_PROFILE
574 #undef _TRACE_PROFILE_INIT