4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/delay.h>
20 #include "trace_output.h"
22 #define TRACE_SYSTEM "TRACE_SYSTEM"
24 DEFINE_MUTEX(event_mutex);
26 LIST_HEAD(ftrace_events);
28 int trace_define_field(struct ftrace_event_call *call, char *type,
29 char *name, int offset, int size, int is_signed)
31 struct ftrace_event_field *field;
33 field = kzalloc(sizeof(*field), GFP_KERNEL);
37 field->name = kstrdup(name, GFP_KERNEL);
41 field->type = kstrdup(type, GFP_KERNEL);
45 field->offset = offset;
47 field->is_signed = is_signed;
48 list_add(&field->link, &call->fields);
61 EXPORT_SYMBOL_GPL(trace_define_field);
65 static void trace_destroy_fields(struct ftrace_event_call *call)
67 struct ftrace_event_field *field, *next;
69 list_for_each_entry_safe(field, next, &call->fields, link) {
70 list_del(&field->link);
77 #endif /* CONFIG_MODULES */
79 static void ftrace_clear_events(void)
81 struct ftrace_event_call *call;
83 mutex_lock(&event_mutex);
84 list_for_each_entry(call, &ftrace_events, list) {
91 mutex_unlock(&event_mutex);
94 static void ftrace_event_enable_disable(struct ftrace_event_call *call,
106 if (!call->enabled) {
114 static int ftrace_set_clr_event(char *buf, int set)
116 struct ftrace_event_call *call;
117 char *event = NULL, *sub = NULL, *match;
121 * The buf format can be <subsystem>:<event-name>
122 * *:<event-name> means any event by that name.
123 * :<event-name> is the same.
125 * <subsystem>:* means all events in that subsystem
126 * <subsystem>: means the same.
128 * <name> (no ':') means all events in a subsystem with
129 * the name <name> or any event that matches <name>
132 match = strsep(&buf, ":");
138 if (!strlen(sub) || strcmp(sub, "*") == 0)
140 if (!strlen(event) || strcmp(event, "*") == 0)
144 mutex_lock(&event_mutex);
145 list_for_each_entry(call, &ftrace_events, list) {
147 if (!call->name || !call->regfunc)
151 strcmp(match, call->name) != 0 &&
152 strcmp(match, call->system) != 0)
155 if (sub && strcmp(sub, call->system) != 0)
158 if (event && strcmp(event, call->name) != 0)
161 ftrace_event_enable_disable(call, set);
165 mutex_unlock(&event_mutex);
170 /* 128 should be much more than enough */
171 #define EVENT_BUF_SIZE 127
174 ftrace_event_write(struct file *file, const char __user *ubuf,
175 size_t cnt, loff_t *ppos)
186 ret = tracing_update_buffers();
190 ret = get_user(ch, ubuf++);
196 /* skip white space */
197 while (cnt && isspace(ch)) {
198 ret = get_user(ch, ubuf++);
205 /* Only white space found? */
212 buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL);
216 if (cnt > EVENT_BUF_SIZE)
217 cnt = EVENT_BUF_SIZE;
220 while (cnt && !isspace(ch)) {
226 ret = get_user(ch, ubuf++);
236 ret = ftrace_set_clr_event(buf, set);
249 t_next(struct seq_file *m, void *v, loff_t *pos)
251 struct list_head *list = m->private;
252 struct ftrace_event_call *call;
257 if (list == &ftrace_events)
260 call = list_entry(list, struct ftrace_event_call, list);
263 * The ftrace subsystem is for showing formats only.
264 * They can not be enabled or disabled via the event files.
272 m->private = list->next;
277 static void *t_start(struct seq_file *m, loff_t *pos)
279 mutex_lock(&event_mutex);
281 m->private = ftrace_events.next;
282 return t_next(m, NULL, pos);
286 s_next(struct seq_file *m, void *v, loff_t *pos)
288 struct list_head *list = m->private;
289 struct ftrace_event_call *call;
294 if (list == &ftrace_events)
297 call = list_entry(list, struct ftrace_event_call, list);
299 if (!call->enabled) {
304 m->private = list->next;
309 static void *s_start(struct seq_file *m, loff_t *pos)
311 mutex_lock(&event_mutex);
313 m->private = ftrace_events.next;
314 return s_next(m, NULL, pos);
317 static int t_show(struct seq_file *m, void *v)
319 struct ftrace_event_call *call = v;
321 if (strcmp(call->system, TRACE_SYSTEM) != 0)
322 seq_printf(m, "%s:", call->system);
323 seq_printf(m, "%s\n", call->name);
328 static void t_stop(struct seq_file *m, void *p)
330 mutex_unlock(&event_mutex);
334 ftrace_event_seq_open(struct inode *inode, struct file *file)
336 const struct seq_operations *seq_ops;
338 if ((file->f_mode & FMODE_WRITE) &&
339 !(file->f_flags & O_APPEND))
340 ftrace_clear_events();
342 seq_ops = inode->i_private;
343 return seq_open(file, seq_ops);
347 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
350 struct ftrace_event_call *call = filp->private_data;
358 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
362 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
365 struct ftrace_event_call *call = filp->private_data;
370 if (cnt >= sizeof(buf))
373 if (copy_from_user(&buf, ubuf, cnt))
378 ret = strict_strtoul(buf, 10, &val);
382 ret = tracing_update_buffers();
389 mutex_lock(&event_mutex);
390 ftrace_event_enable_disable(call, val);
391 mutex_unlock(&event_mutex);
404 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
407 const char *system = filp->private_data;
408 struct ftrace_event_call *call;
414 if (system[0] == '*')
417 mutex_lock(&event_mutex);
418 list_for_each_entry(call, &ftrace_events, list) {
419 if (!call->name || !call->regfunc)
422 if (!all && strcmp(call->system, system) != 0)
426 * We need to find out if all the events are set
427 * or if all events or cleared, or if we have
450 * If we have a mixture, no need to look further.
455 mutex_unlock(&event_mutex);
472 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
478 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
481 const char *system = filp->private_data;
487 if (cnt >= sizeof(buf))
490 if (copy_from_user(&buf, ubuf, cnt))
495 ret = strict_strtoul(buf, 10, &val);
499 ret = tracing_update_buffers();
512 command = kstrdup(system, GFP_KERNEL);
516 ret = ftrace_set_clr_event(command, val);
530 extern char *__bad_type_size(void);
533 #define FIELD(type, name) \
534 sizeof(type) != sizeof(field.name) ? __bad_type_size() : \
535 #type, "common_" #name, offsetof(typeof(field), name), \
538 static int trace_write_header(struct trace_seq *s)
540 struct trace_entry field;
542 /* struct trace_entry */
543 return trace_seq_printf(s,
544 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
545 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
546 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
547 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
548 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
550 FIELD(unsigned short, type),
551 FIELD(unsigned char, flags),
552 FIELD(unsigned char, preempt_count),
558 event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
561 struct ftrace_event_call *call = filp->private_data;
569 s = kmalloc(sizeof(*s), GFP_KERNEL);
575 /* If any of the first writes fail, so will the show_format. */
577 trace_seq_printf(s, "name: %s\n", call->name);
578 trace_seq_printf(s, "ID: %d\n", call->id);
579 trace_seq_printf(s, "format:\n");
580 trace_write_header(s);
582 r = call->show_format(s);
585 * ug! The format output is bigger than a PAGE!!
587 buf = "FORMAT TOO BIG\n";
588 r = simple_read_from_buffer(ubuf, cnt, ppos,
593 r = simple_read_from_buffer(ubuf, cnt, ppos,
601 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
603 struct ftrace_event_call *call = filp->private_data;
610 s = kmalloc(sizeof(*s), GFP_KERNEL);
615 trace_seq_printf(s, "%d\n", call->id);
617 r = simple_read_from_buffer(ubuf, cnt, ppos,
624 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
627 struct ftrace_event_call *call = filp->private_data;
634 s = kmalloc(sizeof(*s), GFP_KERNEL);
640 print_event_filter(call, s);
641 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
649 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
652 struct ftrace_event_call *call = filp->private_data;
656 if (cnt >= PAGE_SIZE)
659 buf = (char *)__get_free_page(GFP_TEMPORARY);
663 if (copy_from_user(buf, ubuf, cnt)) {
664 free_page((unsigned long) buf);
669 err = apply_event_filter(call, buf);
670 free_page((unsigned long) buf);
680 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
683 struct event_subsystem *system = filp->private_data;
690 s = kmalloc(sizeof(*s), GFP_KERNEL);
696 print_subsystem_event_filter(system, s);
697 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
705 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
708 struct event_subsystem *system = filp->private_data;
712 if (cnt >= PAGE_SIZE)
715 buf = (char *)__get_free_page(GFP_TEMPORARY);
719 if (copy_from_user(buf, ubuf, cnt)) {
720 free_page((unsigned long) buf);
725 err = apply_subsystem_event_filter(system, buf);
726 free_page((unsigned long) buf);
736 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
738 int (*func)(struct trace_seq *s) = filp->private_data;
745 s = kmalloc(sizeof(*s), GFP_KERNEL);
752 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
759 static const struct seq_operations show_event_seq_ops = {
766 static const struct seq_operations show_set_event_seq_ops = {
773 static const struct file_operations ftrace_avail_fops = {
774 .open = ftrace_event_seq_open,
777 .release = seq_release,
780 static const struct file_operations ftrace_set_event_fops = {
781 .open = ftrace_event_seq_open,
783 .write = ftrace_event_write,
785 .release = seq_release,
788 static const struct file_operations ftrace_enable_fops = {
789 .open = tracing_open_generic,
790 .read = event_enable_read,
791 .write = event_enable_write,
794 static const struct file_operations ftrace_event_format_fops = {
795 .open = tracing_open_generic,
796 .read = event_format_read,
799 static const struct file_operations ftrace_event_id_fops = {
800 .open = tracing_open_generic,
801 .read = event_id_read,
804 static const struct file_operations ftrace_event_filter_fops = {
805 .open = tracing_open_generic,
806 .read = event_filter_read,
807 .write = event_filter_write,
810 static const struct file_operations ftrace_subsystem_filter_fops = {
811 .open = tracing_open_generic,
812 .read = subsystem_filter_read,
813 .write = subsystem_filter_write,
816 static const struct file_operations ftrace_system_enable_fops = {
817 .open = tracing_open_generic,
818 .read = system_enable_read,
819 .write = system_enable_write,
822 static const struct file_operations ftrace_show_header_fops = {
823 .open = tracing_open_generic,
827 static struct dentry *event_trace_events_dir(void)
829 static struct dentry *d_tracer;
830 static struct dentry *d_events;
835 d_tracer = tracing_init_dentry();
839 d_events = debugfs_create_dir("events", d_tracer);
841 pr_warning("Could not create debugfs "
842 "'events' directory\n");
847 static LIST_HEAD(event_subsystems);
849 static struct dentry *
850 event_subsystem_dir(const char *name, struct dentry *d_events)
852 struct event_subsystem *system;
853 struct dentry *entry;
855 /* First see if we did not already create this dir */
856 list_for_each_entry(system, &event_subsystems, list) {
857 if (strcmp(system->name, name) == 0)
858 return system->entry;
861 /* need to create new entry */
862 system = kmalloc(sizeof(*system), GFP_KERNEL);
864 pr_warning("No memory to create event subsystem %s\n",
869 system->entry = debugfs_create_dir(name, d_events);
870 if (!system->entry) {
871 pr_warning("Could not create event subsystem %s\n",
877 system->name = kstrdup(name, GFP_KERNEL);
879 debugfs_remove(system->entry);
884 list_add(&system->list, &event_subsystems);
886 system->filter = NULL;
888 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
889 if (!system->filter) {
890 pr_warning("Could not allocate filter for subsystem "
892 return system->entry;
895 entry = debugfs_create_file("filter", 0644, system->entry, system,
896 &ftrace_subsystem_filter_fops);
898 kfree(system->filter);
899 system->filter = NULL;
900 pr_warning("Could not create debugfs "
901 "'%s/filter' entry\n", name);
904 entry = trace_create_file("enable", 0644, system->entry,
905 (void *)system->name,
906 &ftrace_system_enable_fops);
908 return system->entry;
912 event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
913 const struct file_operations *id,
914 const struct file_operations *enable,
915 const struct file_operations *filter,
916 const struct file_operations *format)
918 struct dentry *entry;
922 * If the trace point header did not define TRACE_SYSTEM
923 * then the system would be called "TRACE_SYSTEM".
925 if (strcmp(call->system, TRACE_SYSTEM) != 0)
926 d_events = event_subsystem_dir(call->system, d_events);
928 if (call->raw_init) {
929 ret = call->raw_init();
931 pr_warning("Could not initialize trace point"
932 " events/%s\n", call->name);
937 call->dir = debugfs_create_dir(call->name, d_events);
939 pr_warning("Could not create debugfs "
940 "'%s' directory\n", call->name);
945 entry = trace_create_file("enable", 0644, call->dir, call,
949 entry = trace_create_file("id", 0444, call->dir, call,
952 if (call->define_fields) {
953 ret = call->define_fields();
955 pr_warning("Could not initialize trace point"
956 " events/%s\n", call->name);
959 entry = trace_create_file("filter", 0644, call->dir, call,
963 /* A trace may not want to export its format */
964 if (!call->show_format)
967 entry = trace_create_file("format", 0444, call->dir, call,
973 #define for_each_event(event, start, end) \
974 for (event = start; \
975 (unsigned long)event < (unsigned long)end; \
978 #ifdef CONFIG_MODULES
980 static LIST_HEAD(ftrace_module_file_list);
983 * Modules must own their file_operations to keep up with
984 * reference counting.
986 struct ftrace_module_file_ops {
987 struct list_head list;
989 struct file_operations id;
990 struct file_operations enable;
991 struct file_operations format;
992 struct file_operations filter;
995 static struct ftrace_module_file_ops *
996 trace_create_file_ops(struct module *mod)
998 struct ftrace_module_file_ops *file_ops;
1001 * This is a bit of a PITA. To allow for correct reference
1002 * counting, modules must "own" their file_operations.
1003 * To do this, we allocate the file operations that will be
1004 * used in the event directory.
1007 file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1011 file_ops->mod = mod;
1013 file_ops->id = ftrace_event_id_fops;
1014 file_ops->id.owner = mod;
1016 file_ops->enable = ftrace_enable_fops;
1017 file_ops->enable.owner = mod;
1019 file_ops->filter = ftrace_event_filter_fops;
1020 file_ops->filter.owner = mod;
1022 file_ops->format = ftrace_event_format_fops;
1023 file_ops->format.owner = mod;
1025 list_add(&file_ops->list, &ftrace_module_file_list);
1030 static void trace_module_add_events(struct module *mod)
1032 struct ftrace_module_file_ops *file_ops = NULL;
1033 struct ftrace_event_call *call, *start, *end;
1034 struct dentry *d_events;
1036 start = mod->trace_events;
1037 end = mod->trace_events + mod->num_trace_events;
1042 d_events = event_trace_events_dir();
1046 for_each_event(call, start, end) {
1047 /* The linker may leave blanks */
1052 * This module has events, create file ops for this module
1053 * if not already done.
1056 file_ops = trace_create_file_ops(mod);
1061 list_add(&call->list, &ftrace_events);
1062 event_create_dir(call, d_events,
1063 &file_ops->id, &file_ops->enable,
1064 &file_ops->filter, &file_ops->format);
1068 static void trace_module_remove_events(struct module *mod)
1070 struct ftrace_module_file_ops *file_ops;
1071 struct ftrace_event_call *call, *p;
1074 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1075 if (call->mod == mod) {
1077 if (call->enabled) {
1082 unregister_ftrace_event(call->event);
1083 debugfs_remove_recursive(call->dir);
1084 list_del(&call->list);
1085 trace_destroy_fields(call);
1086 destroy_preds(call);
1090 /* Now free the file_operations */
1091 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1092 if (file_ops->mod == mod)
1095 if (&file_ops->list != &ftrace_module_file_list) {
1096 list_del(&file_ops->list);
1101 * It is safest to reset the ring buffer if the module being unloaded
1102 * registered any events.
1105 tracing_reset_current_online_cpus();
1108 static int trace_module_notify(struct notifier_block *self,
1109 unsigned long val, void *data)
1111 struct module *mod = data;
1113 mutex_lock(&event_mutex);
1115 case MODULE_STATE_COMING:
1116 trace_module_add_events(mod);
1118 case MODULE_STATE_GOING:
1119 trace_module_remove_events(mod);
1122 mutex_unlock(&event_mutex);
1127 static int trace_module_notify(struct notifier_block *self,
1128 unsigned long val, void *data)
1132 #endif /* CONFIG_MODULES */
1134 struct notifier_block trace_module_nb = {
1135 .notifier_call = trace_module_notify,
1139 extern struct ftrace_event_call __start_ftrace_events[];
1140 extern struct ftrace_event_call __stop_ftrace_events[];
1142 static __init int event_trace_init(void)
1144 struct ftrace_event_call *call;
1145 struct dentry *d_tracer;
1146 struct dentry *entry;
1147 struct dentry *d_events;
1150 d_tracer = tracing_init_dentry();
1154 entry = debugfs_create_file("available_events", 0444, d_tracer,
1155 (void *)&show_event_seq_ops,
1156 &ftrace_avail_fops);
1158 pr_warning("Could not create debugfs "
1159 "'available_events' entry\n");
1161 entry = debugfs_create_file("set_event", 0644, d_tracer,
1162 (void *)&show_set_event_seq_ops,
1163 &ftrace_set_event_fops);
1165 pr_warning("Could not create debugfs "
1166 "'set_event' entry\n");
1168 d_events = event_trace_events_dir();
1172 /* ring buffer internal formats */
1173 trace_create_file("header_page", 0444, d_events,
1174 ring_buffer_print_page_header,
1175 &ftrace_show_header_fops);
1177 trace_create_file("header_event", 0444, d_events,
1178 ring_buffer_print_entry_header,
1179 &ftrace_show_header_fops);
1181 trace_create_file("enable", 0644, d_events,
1182 "*:*", &ftrace_system_enable_fops);
1184 for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
1185 /* The linker may leave blanks */
1188 list_add(&call->list, &ftrace_events);
1189 event_create_dir(call, d_events, &ftrace_event_id_fops,
1190 &ftrace_enable_fops, &ftrace_event_filter_fops,
1191 &ftrace_event_format_fops);
1194 ret = register_module_notifier(&trace_module_nb);
1196 pr_warning("Failed to register trace events module notifier\n");
1200 fs_initcall(event_trace_init);
1202 #ifdef CONFIG_FTRACE_STARTUP_TEST
1204 static DEFINE_SPINLOCK(test_spinlock);
1205 static DEFINE_SPINLOCK(test_spinlock_irq);
1206 static DEFINE_MUTEX(test_mutex);
1208 static __init void test_work(struct work_struct *dummy)
1210 spin_lock(&test_spinlock);
1211 spin_lock_irq(&test_spinlock_irq);
1213 spin_unlock_irq(&test_spinlock_irq);
1214 spin_unlock(&test_spinlock);
1216 mutex_lock(&test_mutex);
1218 mutex_unlock(&test_mutex);
1221 static __init int event_test_thread(void *unused)
1225 test_malloc = kmalloc(1234, GFP_KERNEL);
1227 pr_info("failed to kmalloc\n");
1229 schedule_on_each_cpu(test_work);
1233 set_current_state(TASK_INTERRUPTIBLE);
1234 while (!kthread_should_stop())
1241 * Do various things that may trigger events.
1243 static __init void event_test_stuff(void)
1245 struct task_struct *test_thread;
1247 test_thread = kthread_run(event_test_thread, NULL, "test-events");
1249 kthread_stop(test_thread);
1253 * For every trace event defined, we will test each trace point separately,
1254 * and then by groups, and finally all trace points.
1256 static __init void event_trace_self_tests(void)
1258 struct ftrace_event_call *call;
1259 struct event_subsystem *system;
1263 pr_info("Running tests on trace events:\n");
1265 list_for_each_entry(call, &ftrace_events, list) {
1267 /* Only test those that have a regfunc */
1271 pr_info("Testing event %s: ", call->name);
1274 * If an event is already enabled, someone is using
1275 * it and the self test should not be on.
1277 if (call->enabled) {
1278 pr_warning("Enabled event during self test!\n");
1294 /* Now test at the sub system level */
1296 pr_info("Running tests on trace event systems:\n");
1298 list_for_each_entry(system, &event_subsystems, list) {
1300 /* the ftrace system is special, skip it */
1301 if (strcmp(system->name, "ftrace") == 0)
1304 pr_info("Testing event system %s: ", system->name);
1306 /* ftrace_set_clr_event can modify the name passed in. */
1307 sysname = kstrdup(system->name, GFP_KERNEL);
1308 if (WARN_ON(!sysname)) {
1309 pr_warning("Can't allocate memory, giving up!\n");
1312 ret = ftrace_set_clr_event(sysname, 1);
1314 if (WARN_ON_ONCE(ret)) {
1315 pr_warning("error enabling system %s\n",
1322 sysname = kstrdup(system->name, GFP_KERNEL);
1323 if (WARN_ON(!sysname)) {
1324 pr_warning("Can't allocate memory, giving up!\n");
1327 ret = ftrace_set_clr_event(sysname, 0);
1330 if (WARN_ON_ONCE(ret))
1331 pr_warning("error disabling system %s\n",
1337 /* Test with all events enabled */
1339 pr_info("Running tests on all trace events:\n");
1340 pr_info("Testing all events: ");
1342 sysname = kmalloc(4, GFP_KERNEL);
1343 if (WARN_ON(!sysname)) {
1344 pr_warning("Can't allocate memory, giving up!\n");
1347 memcpy(sysname, "*:*", 4);
1348 ret = ftrace_set_clr_event(sysname, 1);
1349 if (WARN_ON_ONCE(ret)) {
1351 pr_warning("error enabling all events\n");
1358 memcpy(sysname, "*:*", 4);
1359 ret = ftrace_set_clr_event(sysname, 0);
1362 if (WARN_ON_ONCE(ret)) {
1363 pr_warning("error disabling all events\n");
1370 #ifdef CONFIG_FUNCTION_TRACER
1372 static DEFINE_PER_CPU(atomic_t, test_event_disable);
1375 function_test_events_call(unsigned long ip, unsigned long parent_ip)
1377 struct ring_buffer_event *event;
1378 struct ftrace_entry *entry;
1379 unsigned long flags;
1385 pc = preempt_count();
1386 resched = ftrace_preempt_disable();
1387 cpu = raw_smp_processor_id();
1388 disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu));
1393 local_save_flags(flags);
1395 event = trace_current_buffer_lock_reserve(TRACE_FN, sizeof(*entry),
1399 entry = ring_buffer_event_data(event);
1401 entry->parent_ip = parent_ip;
1403 trace_nowake_buffer_unlock_commit(event, flags, pc);
1406 atomic_dec(&per_cpu(test_event_disable, cpu));
1407 ftrace_preempt_enable(resched);
1410 static struct ftrace_ops trace_ops __initdata =
1412 .func = function_test_events_call,
1415 static __init void event_trace_self_test_with_function(void)
1417 register_ftrace_function(&trace_ops);
1418 pr_info("Running tests again, along with the function tracer\n");
1419 event_trace_self_tests();
1420 unregister_ftrace_function(&trace_ops);
1423 static __init void event_trace_self_test_with_function(void)
1428 static __init int event_trace_self_tests_init(void)
1431 event_trace_self_tests();
1433 event_trace_self_test_with_function();
1438 late_initcall(event_trace_self_tests_init);