2969f65d8002fc8ffd42f3518dfbf3dfe4aaccd0
[safe/jmp/linux-2.6] / include / trace / ftrace.h
1 /*
2  * Stage 1 of the trace events.
3  *
4  * Override the macros in <trace/trace_events.h> to include the following:
5  *
6  * struct ftrace_raw_<call> {
7  *      struct trace_entry              ent;
8  *      <type>                          <item>;
9  *      <type2>                         <item2>[<len>];
10  *      [...]
11  * };
12  *
13  * The <type> <item> is created by the __field(type, item) macro or
14  * the __array(type2, item2, len) macro.
15  * We simply do "type item;", and that will create the fields
16  * in the structure.
17  */
18
19 #include <linux/ftrace_event.h>
20
21 /*
22  * TRACE_EVENT_TEMPLATE can be used to add a generic function
23  * handlers for events. That is, if all events have the same
24  * parameters and just have distinct trace points.
25  * Each tracepoint can be defined with DEFINE_EVENT and that
26  * will map the TRACE_EVENT_TEMPLATE to the tracepoint.
27  *
28  * TRACE_EVENT is a one to one mapping between tracepoint and template.
29  */
30 #undef TRACE_EVENT
31 #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
32         TRACE_EVENT_TEMPLATE(name,                             \
33                              PARAMS(proto),                    \
34                              PARAMS(args),                     \
35                              PARAMS(tstruct),                  \
36                              PARAMS(assign),                   \
37                              PARAMS(print));                   \
38         DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
39
40
41 #undef __field
42 #define __field(type, item)             type    item;
43
44 #undef __field_ext
45 #define __field_ext(type, item, filter_type)    type    item;
46
47 #undef __array
48 #define __array(type, item, len)        type    item[len];
49
50 #undef __dynamic_array
51 #define __dynamic_array(type, item, len) u32 __data_loc_##item;
52
53 #undef __string
54 #define __string(item, src) __dynamic_array(char, item, -1)
55
56 #undef TP_STRUCT__entry
57 #define TP_STRUCT__entry(args...) args
58
59 #undef TRACE_EVENT_TEMPLATE
60 #define TRACE_EVENT_TEMPLATE(name, proto, args, tstruct, assign, print) \
61         struct ftrace_raw_##name {                                      \
62                 struct trace_entry      ent;                            \
63                 tstruct                                                 \
64                 char                    __data[0];                      \
65         };
66 #undef DEFINE_EVENT
67 #define DEFINE_EVENT(template, name, proto, args)       \
68         static struct ftrace_event_call event_##name
69
70 #undef __cpparg
71 #define __cpparg(arg...) arg
72
73 /* Callbacks are meaningless to ftrace. */
74 #undef TRACE_EVENT_FN
75 #define TRACE_EVENT_FN(name, proto, args, tstruct,                      \
76                 assign, print, reg, unreg)                              \
77         TRACE_EVENT(name, __cpparg(proto), __cpparg(args),              \
78                 __cpparg(tstruct), __cpparg(assign), __cpparg(print))   \
79
80 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
81
82
83 /*
84  * Stage 2 of the trace events.
85  *
86  * Include the following:
87  *
88  * struct ftrace_data_offsets_<call> {
89  *      u32                             <item1>;
90  *      u32                             <item2>;
91  *      [...]
92  * };
93  *
94  * The __dynamic_array() macro will create each u32 <item>, this is
95  * to keep the offset of each array from the beginning of the event.
96  * The size of an array is also encoded, in the higher 16 bits of <item>.
97  */
98
99 #undef __field
100 #define __field(type, item)
101
102 #undef __field_ext
103 #define __field_ext(type, item, filter_type)
104
105 #undef __array
106 #define __array(type, item, len)
107
108 #undef __dynamic_array
109 #define __dynamic_array(type, item, len)        u32 item;
110
111 #undef __string
112 #define __string(item, src) __dynamic_array(char, item, -1)
113
114 #undef TRACE_EVENT_TEMPLATE
115 #define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print) \
116         struct ftrace_data_offsets_##call {                             \
117                 tstruct;                                                \
118         };
119
120 #undef DEFINE_EVENT
121 #define DEFINE_EVENT(template, name, proto, args)
122
123 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
124
125 /*
126  * Setup the showing format of trace point.
127  *
128  * int
129  * ftrace_format_##call(struct trace_seq *s)
130  * {
131  *      struct ftrace_raw_##call field;
132  *      int ret;
133  *
134  *      ret = trace_seq_printf(s, #type " " #item ";"
135  *                             " offset:%u; size:%u;\n",
136  *                             offsetof(struct ftrace_raw_##call, item),
137  *                             sizeof(field.type));
138  *
139  * }
140  */
141
142 #undef TP_STRUCT__entry
143 #define TP_STRUCT__entry(args...) args
144
145 #undef __field
146 #define __field(type, item)                                     \
147         ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t"      \
148                                "offset:%u;\tsize:%u;\tsigned:%u;\n",    \
149                                (unsigned int)offsetof(typeof(field), item), \
150                                (unsigned int)sizeof(field.item),        \
151                                (unsigned int)is_signed_type(type));     \
152         if (!ret)                                                       \
153                 return 0;
154
155 #undef __field_ext
156 #define __field_ext(type, item, filter_type)    __field(type, item)
157
158 #undef __array
159 #define __array(type, item, len)                                                \
160         ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t"    \
161                                "offset:%u;\tsize:%u;\tsigned:%u;\n",    \
162                                (unsigned int)offsetof(typeof(field), item), \
163                                (unsigned int)sizeof(field.item),        \
164                                (unsigned int)is_signed_type(type));     \
165         if (!ret)                                                       \
166                 return 0;
167
168 #undef __dynamic_array
169 #define __dynamic_array(type, item, len)                                       \
170         ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\
171                                "offset:%u;\tsize:%u;\tsigned:%u;\n",           \
172                                (unsigned int)offsetof(typeof(field),           \
173                                         __data_loc_##item),                    \
174                                (unsigned int)sizeof(field.__data_loc_##item), \
175                                (unsigned int)is_signed_type(type));     \
176         if (!ret)                                                              \
177                 return 0;
178
179 #undef __string
180 #define __string(item, src) __dynamic_array(char, item, -1)
181
182 #undef __entry
183 #define __entry REC
184
185 #undef __print_symbolic
186 #undef __get_dynamic_array
187 #undef __get_str
188
189 #undef TP_printk
190 #define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args)
191
192 #undef TP_fast_assign
193 #define TP_fast_assign(args...) args
194
195 #undef TP_perf_assign
196 #define TP_perf_assign(args...)
197
198 #undef TRACE_EVENT_TEMPLATE
199 #define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, func, print)   \
200 static int                                                              \
201 ftrace_format_##call(struct ftrace_event_call *unused,                  \
202                       struct trace_seq *s)                              \
203 {                                                                       \
204         struct ftrace_raw_##call field __attribute__((unused));         \
205         int ret = 0;                                                    \
206                                                                         \
207         tstruct;                                                        \
208                                                                         \
209         trace_seq_printf(s, "\nprint fmt: " print);                     \
210                                                                         \
211         return ret;                                                     \
212 }
213
214 #undef DEFINE_EVENT
215 #define DEFINE_EVENT(template, name, proto, args)
216
217 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
218
219 /*
220  * Stage 3 of the trace events.
221  *
222  * Override the macros in <trace/trace_events.h> to include the following:
223  *
224  * enum print_line_t
225  * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
226  * {
227  *      struct trace_seq *s = &iter->seq;
228  *      struct ftrace_raw_<call> *field; <-- defined in stage 1
229  *      struct trace_entry *entry;
230  *      struct trace_seq *p;
231  *      int ret;
232  *
233  *      entry = iter->ent;
234  *
235  *      if (entry->type != event_<call>.id) {
236  *              WARN_ON_ONCE(1);
237  *              return TRACE_TYPE_UNHANDLED;
238  *      }
239  *
240  *      field = (typeof(field))entry;
241  *
242  *      p = get_cpu_var(ftrace_event_seq);
243  *      trace_seq_init(p);
244  *      ret = trace_seq_printf(s, <TP_printk> "\n");
245  *      put_cpu();
246  *      if (!ret)
247  *              return TRACE_TYPE_PARTIAL_LINE;
248  *
249  *      return TRACE_TYPE_HANDLED;
250  * }
251  *
252  * This is the method used to print the raw event to the trace
253  * output format. Note, this is not needed if the data is read
254  * in binary.
255  */
256
257 #undef __entry
258 #define __entry field
259
260 #undef TP_printk
261 #define TP_printk(fmt, args...) fmt "\n", args
262
263 #undef __get_dynamic_array
264 #define __get_dynamic_array(field)      \
265                 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
266
267 #undef __get_str
268 #define __get_str(field) (char *)__get_dynamic_array(field)
269
270 #undef __print_flags
271 #define __print_flags(flag, delim, flag_array...)                       \
272         ({                                                              \
273                 static const struct trace_print_flags __flags[] =       \
274                         { flag_array, { -1, NULL }};                    \
275                 ftrace_print_flags_seq(p, delim, flag, __flags);        \
276         })
277
278 #undef __print_symbolic
279 #define __print_symbolic(value, symbol_array...)                        \
280         ({                                                              \
281                 static const struct trace_print_flags symbols[] =       \
282                         { symbol_array, { -1, NULL }};                  \
283                 ftrace_print_symbols_seq(p, value, symbols);            \
284         })
285
286 #undef TRACE_EVENT_TEMPLATE
287 #define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print) \
288 static enum print_line_t                                                \
289 ftrace_raw_output_id_##call(int event_id, const char *name,             \
290                             struct trace_iterator *iter, int flags)     \
291 {                                                                       \
292         struct trace_seq *s = &iter->seq;                               \
293         struct ftrace_raw_##call *field;                                \
294         struct trace_entry *entry;                                      \
295         struct trace_seq *p;                                            \
296         int ret;                                                        \
297                                                                         \
298         entry = iter->ent;                                              \
299                                                                         \
300         if (entry->type != event_id) {                                  \
301                 WARN_ON_ONCE(1);                                        \
302                 return TRACE_TYPE_UNHANDLED;                            \
303         }                                                               \
304                                                                         \
305         field = (typeof(field))entry;                                   \
306                                                                         \
307         p = &get_cpu_var(ftrace_event_seq);                             \
308         trace_seq_init(p);                                              \
309         ret = trace_seq_printf(s, "%s: ", name);                        \
310         if (ret)                                                        \
311                 ret = trace_seq_printf(s, print);                       \
312         put_cpu();                                                      \
313         if (!ret)                                                       \
314                 return TRACE_TYPE_PARTIAL_LINE;                         \
315                                                                         \
316         return TRACE_TYPE_HANDLED;                                      \
317 }
318
319 #undef DEFINE_EVENT
320 #define DEFINE_EVENT(template, name, proto, args)                       \
321 static enum print_line_t                                                \
322 ftrace_raw_output_##name(struct trace_iterator *iter, int flags)        \
323 {                                                                       \
324         return ftrace_raw_output_id_##template(event_##name.id,         \
325                                                #name, iter, flags);     \
326 }
327
328 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
329
330 #undef __field_ext
331 #define __field_ext(type, item, filter_type)                            \
332         ret = trace_define_field(event_call, #type, #item,              \
333                                  offsetof(typeof(field), item),         \
334                                  sizeof(field.item),                    \
335                                  is_signed_type(type), filter_type);    \
336         if (ret)                                                        \
337                 return ret;
338
339 #undef __field
340 #define __field(type, item)     __field_ext(type, item, FILTER_OTHER)
341
342 #undef __array
343 #define __array(type, item, len)                                        \
344         BUILD_BUG_ON(len > MAX_FILTER_STR_VAL);                         \
345         ret = trace_define_field(event_call, #type "[" #len "]", #item, \
346                                  offsetof(typeof(field), item),         \
347                                  sizeof(field.item), 0, FILTER_OTHER);  \
348         if (ret)                                                        \
349                 return ret;
350
351 #undef __dynamic_array
352 #define __dynamic_array(type, item, len)                                       \
353         ret = trace_define_field(event_call, "__data_loc " #type "[]", #item,  \
354                                  offsetof(typeof(field), __data_loc_##item),   \
355                                  sizeof(field.__data_loc_##item), 0,           \
356                                  FILTER_OTHER);
357
358 #undef __string
359 #define __string(item, src) __dynamic_array(char, item, -1)
360
361 #undef TRACE_EVENT_TEMPLATE
362 #define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, func, print)   \
363 static int                                                              \
364 ftrace_define_fields_##call(struct ftrace_event_call *event_call)       \
365 {                                                                       \
366         struct ftrace_raw_##call field;                                 \
367         int ret;                                                        \
368                                                                         \
369         ret = trace_define_common_fields(event_call);                   \
370         if (ret)                                                        \
371                 return ret;                                             \
372                                                                         \
373         tstruct;                                                        \
374                                                                         \
375         return ret;                                                     \
376 }
377
378 #undef DEFINE_EVENT
379 #define DEFINE_EVENT(template, name, proto, args)
380
381 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
382
383 /*
384  * remember the offset of each array from the beginning of the event.
385  */
386
387 #undef __entry
388 #define __entry entry
389
390 #undef __field
391 #define __field(type, item)
392
393 #undef __field_ext
394 #define __field_ext(type, item, filter_type)
395
396 #undef __array
397 #define __array(type, item, len)
398
399 #undef __dynamic_array
400 #define __dynamic_array(type, item, len)                                \
401         __data_offsets->item = __data_size +                            \
402                                offsetof(typeof(*entry), __data);        \
403         __data_offsets->item |= (len * sizeof(type)) << 16;             \
404         __data_size += (len) * sizeof(type);
405
406 #undef __string
407 #define __string(item, src) __dynamic_array(char, item, strlen(src) + 1)
408
409 #undef TRACE_EVENT_TEMPLATE
410 #define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print) \
411 static inline int ftrace_get_offsets_##call(                            \
412         struct ftrace_data_offsets_##call *__data_offsets, proto)       \
413 {                                                                       \
414         int __data_size = 0;                                            \
415         struct ftrace_raw_##call __maybe_unused *entry;                 \
416                                                                         \
417         tstruct;                                                        \
418                                                                         \
419         return __data_size;                                             \
420 }
421
422 #undef DEFINE_EVENT
423 #define DEFINE_EVENT(template, name, proto, args)
424
425 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
426
427 #ifdef CONFIG_EVENT_PROFILE
428
429 /*
430  * Generate the functions needed for tracepoint perf_event support.
431  *
432  * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
433  *
434  * static int ftrace_profile_enable_<call>(void)
435  * {
436  *      return register_trace_<call>(ftrace_profile_<call>);
437  * }
438  *
439  * static void ftrace_profile_disable_<call>(void)
440  * {
441  *      unregister_trace_<call>(ftrace_profile_<call>);
442  * }
443  *
444  */
445
446 #undef TRACE_EVENT_TEMPLATE
447 #define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print)
448
449 #undef DEFINE_EVENT
450 #define DEFINE_EVENT(template, name, proto, args)                       \
451                                                                         \
452 static void ftrace_profile_##name(proto);                               \
453                                                                         \
454 static int ftrace_profile_enable_##name(struct ftrace_event_call *unused)\
455 {                                                                       \
456         return register_trace_##name(ftrace_profile_##name);            \
457 }                                                                       \
458                                                                         \
459 static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\
460 {                                                                       \
461         unregister_trace_##name(ftrace_profile_##name);                 \
462 }
463
464 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
465
466 #endif
467
468 /*
469  * Stage 4 of the trace events.
470  *
471  * Override the macros in <trace/trace_events.h> to include the following:
472  *
473  * static void ftrace_event_<call>(proto)
474  * {
475  *      event_trace_printk(_RET_IP_, "<call>: " <fmt>);
476  * }
477  *
478  * static int ftrace_reg_event_<call>(struct ftrace_event_call *unused)
479  * {
480  *      int ret;
481  *
482  *      ret = register_trace_<call>(ftrace_event_<call>);
483  *      if (!ret)
484  *              pr_info("event trace: Could not activate trace point "
485  *                      "probe to  <call>");
486  *      return ret;
487  * }
488  *
489  * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
490  * {
491  *      unregister_trace_<call>(ftrace_event_<call>);
492  * }
493  *
494  *
495  * For those macros defined with TRACE_EVENT:
496  *
497  * static struct ftrace_event_call event_<call>;
498  *
499  * static void ftrace_raw_event_<call>(proto)
500  * {
501  *      struct ring_buffer_event *event;
502  *      struct ftrace_raw_<call> *entry; <-- defined in stage 1
503  *      struct ring_buffer *buffer;
504  *      unsigned long irq_flags;
505  *      int pc;
506  *
507  *      local_save_flags(irq_flags);
508  *      pc = preempt_count();
509  *
510  *      event = trace_current_buffer_lock_reserve(&buffer,
511  *                                event_<call>.id,
512  *                                sizeof(struct ftrace_raw_<call>),
513  *                                irq_flags, pc);
514  *      if (!event)
515  *              return;
516  *      entry   = ring_buffer_event_data(event);
517  *
518  *      <assign>;  <-- Here we assign the entries by the __field and
519  *                      __array macros.
520  *
521  *      trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc);
522  * }
523  *
524  * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused)
525  * {
526  *      int ret;
527  *
528  *      ret = register_trace_<call>(ftrace_raw_event_<call>);
529  *      if (!ret)
530  *              pr_info("event trace: Could not activate trace point "
531  *                      "probe to <call>");
532  *      return ret;
533  * }
534  *
535  * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
536  * {
537  *      unregister_trace_<call>(ftrace_raw_event_<call>);
538  * }
539  *
540  * static struct trace_event ftrace_event_type_<call> = {
541  *      .trace                  = ftrace_raw_output_<call>, <-- stage 2
542  * };
543  *
544  * static int ftrace_raw_init_event_<call>(struct ftrace_event_call *unused)
545  * {
546  *      int id;
547  *
548  *      id = register_ftrace_event(&ftrace_event_type_<call>);
549  *      if (!id)
550  *              return -ENODEV;
551  *      event_<call>.id = id;
552  *      return 0;
553  * }
554  *
555  * static struct ftrace_event_call __used
556  * __attribute__((__aligned__(4)))
557  * __attribute__((section("_ftrace_events"))) event_<call> = {
558  *      .name                   = "<call>",
559  *      .system                 = "<system>",
560  *      .raw_init               = ftrace_raw_init_event_<call>,
561  *      .regfunc                = ftrace_reg_event_<call>,
562  *      .unregfunc              = ftrace_unreg_event_<call>,
563  *      .show_format            = ftrace_format_<call>,
564  * }
565  *
566  */
567
568 #undef TP_FMT
569 #define TP_FMT(fmt, args...)    fmt "\n", ##args
570
571 #ifdef CONFIG_EVENT_PROFILE
572
573 #define _TRACE_PROFILE_INIT(call)                                       \
574         .profile_count = ATOMIC_INIT(-1),                               \
575         .profile_enable = ftrace_profile_enable_##call,                 \
576         .profile_disable = ftrace_profile_disable_##call,
577
578 #else
579 #define _TRACE_PROFILE_INIT(call)
580 #endif
581
582 #undef __entry
583 #define __entry entry
584
585 #undef __field
586 #define __field(type, item)
587
588 #undef __array
589 #define __array(type, item, len)
590
591 #undef __dynamic_array
592 #define __dynamic_array(type, item, len)                                \
593         __entry->__data_loc_##item = __data_offsets.item;
594
595 #undef __string
596 #define __string(item, src) __dynamic_array(char, item, -1)             \
597
598 #undef __assign_str
599 #define __assign_str(dst, src)                                          \
600         strcpy(__get_str(dst), src);
601
602 #undef TRACE_EVENT_TEMPLATE
603 #define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print) \
604                                                                         \
605 static void ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \
606                                        proto)                           \
607 {                                                                       \
608         struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
609         struct ring_buffer_event *event;                                \
610         struct ftrace_raw_##call *entry;                                \
611         struct ring_buffer *buffer;                                     \
612         unsigned long irq_flags;                                        \
613         int __data_size;                                                \
614         int pc;                                                         \
615                                                                         \
616         local_save_flags(irq_flags);                                    \
617         pc = preempt_count();                                           \
618                                                                         \
619         __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
620                                                                         \
621         event = trace_current_buffer_lock_reserve(&buffer,              \
622                                  event_call->id,                        \
623                                  sizeof(*entry) + __data_size,          \
624                                  irq_flags, pc);                        \
625         if (!event)                                                     \
626                 return;                                                 \
627         entry   = ring_buffer_event_data(event);                        \
628                                                                         \
629                                                                         \
630         tstruct                                                         \
631                                                                         \
632         { assign; }                                                     \
633                                                                         \
634         if (!filter_current_check_discard(buffer, event_call, entry, event)) \
635                 trace_nowake_buffer_unlock_commit(buffer,               \
636                                                   event, irq_flags, pc); \
637 }
638
639 #undef DEFINE_EVENT
640 #define DEFINE_EVENT(template, call, proto, args)                       \
641                                                                         \
642 static void ftrace_raw_event_##call(proto)                              \
643 {                                                                       \
644         ftrace_raw_event_id_##template(&event_##call, args);            \
645 }                                                                       \
646                                                                         \
647 static int ftrace_raw_reg_event_##call(struct ftrace_event_call *unused)\
648 {                                                                       \
649         int ret;                                                        \
650                                                                         \
651         ret = register_trace_##call(ftrace_raw_event_##call);           \
652         if (ret)                                                        \
653                 pr_info("event trace: Could not activate trace point "  \
654                         "probe to " #call "\n");                        \
655         return ret;                                                     \
656 }                                                                       \
657                                                                         \
658 static void ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused)\
659 {                                                                       \
660         unregister_trace_##call(ftrace_raw_event_##call);               \
661 }                                                                       \
662                                                                         \
663 static struct trace_event ftrace_event_type_##call = {                  \
664         .trace                  = ftrace_raw_output_##call,             \
665 };                                                                      \
666                                                                         \
667 static int ftrace_raw_init_event_##call(struct ftrace_event_call *unused)\
668 {                                                                       \
669         int id;                                                         \
670                                                                         \
671         id = register_ftrace_event(&ftrace_event_type_##call);          \
672         if (!id)                                                        \
673                 return -ENODEV;                                         \
674         event_##call.id = id;                                           \
675         INIT_LIST_HEAD(&event_##call.fields);                           \
676         return 0;                                                       \
677 }                                                                       \
678                                                                         \
679 static struct ftrace_event_call __used                                  \
680 __attribute__((__aligned__(4)))                                         \
681 __attribute__((section("_ftrace_events"))) event_##call = {             \
682         .name                   = #call,                                \
683         .system                 = __stringify(TRACE_SYSTEM),            \
684         .event                  = &ftrace_event_type_##call,            \
685         .raw_init               = ftrace_raw_init_event_##call,         \
686         .regfunc                = ftrace_raw_reg_event_##call,          \
687         .unregfunc              = ftrace_raw_unreg_event_##call,        \
688         .show_format            = ftrace_format_##template,             \
689         .define_fields          = ftrace_define_fields_##template,      \
690         _TRACE_PROFILE_INIT(call)                                       \
691 }
692
693 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
694
695 /*
696  * Define the insertion callback to profile events
697  *
698  * The job is very similar to ftrace_raw_event_<call> except that we don't
699  * insert in the ring buffer but in a perf counter.
700  *
701  * static void ftrace_profile_<call>(proto)
702  * {
703  *      struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
704  *      struct ftrace_event_call *event_call = &event_<call>;
705  *      extern void perf_tp_event(int, u64, u64, void *, int);
706  *      struct ftrace_raw_##call *entry;
707  *      struct perf_trace_buf *trace_buf;
708  *      u64 __addr = 0, __count = 1;
709  *      unsigned long irq_flags;
710  *      struct trace_entry *ent;
711  *      int __entry_size;
712  *      int __data_size;
713  *      int __cpu
714  *      int pc;
715  *
716  *      pc = preempt_count();
717  *
718  *      __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
719  *
720  *      // Below we want to get the aligned size by taking into account
721  *      // the u32 field that will later store the buffer size
722  *      __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
723  *                           sizeof(u64));
724  *      __entry_size -= sizeof(u32);
725  *
726  *      // Protect the non nmi buffer
727  *      // This also protects the rcu read side
728  *      local_irq_save(irq_flags);
729  *      __cpu = smp_processor_id();
730  *
731  *      if (in_nmi())
732  *              trace_buf = rcu_dereference(perf_trace_buf_nmi);
733  *      else
734  *              trace_buf = rcu_dereference(perf_trace_buf);
735  *
736  *      if (!trace_buf)
737  *              goto end;
738  *
739  *      trace_buf = per_cpu_ptr(trace_buf, __cpu);
740  *
741  *      // Avoid recursion from perf that could mess up the buffer
742  *      if (trace_buf->recursion++)
743  *              goto end_recursion;
744  *
745  *      raw_data = trace_buf->buf;
746  *
747  *      // Make recursion update visible before entering perf_tp_event
748  *      // so that we protect from perf recursions.
749  *
750  *      barrier();
751  *
752  *      //zero dead bytes from alignment to avoid stack leak to userspace:
753  *      *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
754  *      entry = (struct ftrace_raw_<call> *)raw_data;
755  *      ent = &entry->ent;
756  *      tracing_generic_entry_update(ent, irq_flags, pc);
757  *      ent->type = event_call->id;
758  *
759  *      <tstruct> <- do some jobs with dynamic arrays
760  *
761  *      <assign>  <- affect our values
762  *
763  *      perf_tp_event(event_call->id, __addr, __count, entry,
764  *                   __entry_size);  <- submit them to perf counter
765  *
766  * }
767  */
768
769 #ifdef CONFIG_EVENT_PROFILE
770
771 #undef __perf_addr
772 #define __perf_addr(a) __addr = (a)
773
774 #undef __perf_count
775 #define __perf_count(c) __count = (c)
776
777 #undef TRACE_EVENT_TEMPLATE
778 #define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print) \
779 static void                                                             \
780 ftrace_profile_templ_##call(struct ftrace_event_call *event_call,       \
781                             proto)                                      \
782 {                                                                       \
783         struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
784         extern int perf_swevent_get_recursion_context(void);            \
785         extern void perf_swevent_put_recursion_context(int rctx);       \
786         extern void perf_tp_event(int, u64, u64, void *, int);          \
787         struct ftrace_raw_##call *entry;                                \
788         u64 __addr = 0, __count = 1;                                    \
789         unsigned long irq_flags;                                        \
790         struct trace_entry *ent;                                        \
791         int __entry_size;                                               \
792         int __data_size;                                                \
793         char *trace_buf;                                                \
794         char *raw_data;                                                 \
795         int __cpu;                                                      \
796         int rctx;                                                       \
797         int pc;                                                         \
798                                                                         \
799         pc = preempt_count();                                           \
800                                                                         \
801         __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
802         __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
803                              sizeof(u64));                              \
804         __entry_size -= sizeof(u32);                                    \
805                                                                         \
806         if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE,           \
807                       "profile buffer not large enough"))               \
808                 return;                                                 \
809                                                                         \
810         local_irq_save(irq_flags);                                      \
811                                                                         \
812         rctx = perf_swevent_get_recursion_context();                    \
813         if (rctx < 0)                                                   \
814                 goto end_recursion;                                     \
815                                                                         \
816         __cpu = smp_processor_id();                                     \
817                                                                         \
818         if (in_nmi())                                                   \
819                 trace_buf = rcu_dereference(perf_trace_buf_nmi);        \
820         else                                                            \
821                 trace_buf = rcu_dereference(perf_trace_buf);            \
822                                                                         \
823         if (!trace_buf)                                                 \
824                 goto end;                                               \
825                                                                         \
826         raw_data = per_cpu_ptr(trace_buf, __cpu);                       \
827                                                                         \
828         *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;         \
829         entry = (struct ftrace_raw_##call *)raw_data;                   \
830         ent = &entry->ent;                                              \
831         tracing_generic_entry_update(ent, irq_flags, pc);               \
832         ent->type = event_call->id;                                     \
833                                                                         \
834         tstruct                                                         \
835                                                                         \
836         { assign; }                                                     \
837                                                                         \
838         perf_tp_event(event_call->id, __addr, __count, entry,           \
839                              __entry_size);                             \
840                                                                         \
841 end:                                                                    \
842         perf_swevent_put_recursion_context(rctx);                       \
843 end_recursion:                                                          \
844         local_irq_restore(irq_flags);                                   \
845                                                                         \
846 }
847
848 #undef DEFINE_EVENT
849 #define DEFINE_EVENT(template, call, proto, args)               \
850 static void ftrace_profile_##call(proto)                        \
851 {                                                               \
852         struct ftrace_event_call *event_call = &event_##call;   \
853                                                                 \
854         ftrace_profile_templ_##template(event_call, args);      \
855 }
856
857 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
858 #endif /* CONFIG_EVENT_PROFILE */
859
860 #undef _TRACE_PROFILE_INIT
861