4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/delay.h>
20 #include <asm/setup.h>
22 #include "trace_output.h"
24 #define TRACE_SYSTEM "TRACE_SYSTEM"
26 DEFINE_MUTEX(event_mutex);
28 LIST_HEAD(ftrace_events);
30 int trace_define_field(struct ftrace_event_call *call, const char *type,
31 const char *name, int offset, int size, int is_signed)
33 struct ftrace_event_field *field;
35 field = kzalloc(sizeof(*field), GFP_KERNEL);
39 field->name = kstrdup(name, GFP_KERNEL);
43 field->type = kstrdup(type, GFP_KERNEL);
47 field->filter_type = filter_assign_type(type);
48 field->offset = offset;
50 field->is_signed = is_signed;
52 list_add(&field->link, &call->fields);
65 EXPORT_SYMBOL_GPL(trace_define_field);
67 #define __common_field(type, item) \
68 ret = trace_define_field(call, #type, "common_" #item, \
69 offsetof(typeof(ent), item), \
71 is_signed_type(type)); \
75 int trace_define_common_fields(struct ftrace_event_call *call)
78 struct trace_entry ent;
80 __common_field(unsigned short, type);
81 __common_field(unsigned char, flags);
82 __common_field(unsigned char, preempt_count);
83 __common_field(int, pid);
84 __common_field(int, tgid);
88 EXPORT_SYMBOL_GPL(trace_define_common_fields);
92 static void trace_destroy_fields(struct ftrace_event_call *call)
94 struct ftrace_event_field *field, *next;
96 list_for_each_entry_safe(field, next, &call->fields, link) {
97 list_del(&field->link);
104 #endif /* CONFIG_MODULES */
106 static void ftrace_event_enable_disable(struct ftrace_event_call *call,
113 tracing_stop_cmdline_record();
114 call->unregfunc(call->data);
118 if (!call->enabled) {
120 tracing_start_cmdline_record();
121 call->regfunc(call->data);
127 static void ftrace_clear_events(void)
129 struct ftrace_event_call *call;
131 mutex_lock(&event_mutex);
132 list_for_each_entry(call, &ftrace_events, list) {
133 ftrace_event_enable_disable(call, 0);
135 mutex_unlock(&event_mutex);
139 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
141 static int __ftrace_set_clr_event(const char *match, const char *sub,
142 const char *event, int set)
144 struct ftrace_event_call *call;
147 mutex_lock(&event_mutex);
148 list_for_each_entry(call, &ftrace_events, list) {
150 if (!call->name || !call->regfunc)
154 strcmp(match, call->name) != 0 &&
155 strcmp(match, call->system) != 0)
158 if (sub && strcmp(sub, call->system) != 0)
161 if (event && strcmp(event, call->name) != 0)
164 ftrace_event_enable_disable(call, set);
168 mutex_unlock(&event_mutex);
173 static int ftrace_set_clr_event(char *buf, int set)
175 char *event = NULL, *sub = NULL, *match;
178 * The buf format can be <subsystem>:<event-name>
179 * *:<event-name> means any event by that name.
180 * :<event-name> is the same.
182 * <subsystem>:* means all events in that subsystem
183 * <subsystem>: means the same.
185 * <name> (no ':') means all events in a subsystem with
186 * the name <name> or any event that matches <name>
189 match = strsep(&buf, ":");
195 if (!strlen(sub) || strcmp(sub, "*") == 0)
197 if (!strlen(event) || strcmp(event, "*") == 0)
201 return __ftrace_set_clr_event(match, sub, event, set);
205 * trace_set_clr_event - enable or disable an event
206 * @system: system name to match (NULL for any system)
207 * @event: event name to match (NULL for all events, within system)
208 * @set: 1 to enable, 0 to disable
210 * This is a way for other parts of the kernel to enable or disable
213 * Returns 0 on success, -EINVAL if the parameters do not match any
216 int trace_set_clr_event(const char *system, const char *event, int set)
218 return __ftrace_set_clr_event(NULL, system, event, set);
221 /* 128 should be much more than enough */
222 #define EVENT_BUF_SIZE 127
225 ftrace_event_write(struct file *file, const char __user *ubuf,
226 size_t cnt, loff_t *ppos)
237 ret = tracing_update_buffers();
241 ret = get_user(ch, ubuf++);
247 /* skip white space */
248 while (cnt && isspace(ch)) {
249 ret = get_user(ch, ubuf++);
256 /* Only white space found? */
263 buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL);
267 if (cnt > EVENT_BUF_SIZE)
268 cnt = EVENT_BUF_SIZE;
271 while (cnt && !isspace(ch)) {
277 ret = get_user(ch, ubuf++);
287 ret = ftrace_set_clr_event(buf, set);
300 t_next(struct seq_file *m, void *v, loff_t *pos)
302 struct list_head *list = m->private;
303 struct ftrace_event_call *call;
308 if (list == &ftrace_events)
311 call = list_entry(list, struct ftrace_event_call, list);
314 * The ftrace subsystem is for showing formats only.
315 * They can not be enabled or disabled via the event files.
323 m->private = list->next;
328 static void *t_start(struct seq_file *m, loff_t *pos)
330 struct ftrace_event_call *call = NULL;
333 mutex_lock(&event_mutex);
335 m->private = ftrace_events.next;
336 for (l = 0; l <= *pos; ) {
337 call = t_next(m, NULL, &l);
345 s_next(struct seq_file *m, void *v, loff_t *pos)
347 struct list_head *list = m->private;
348 struct ftrace_event_call *call;
353 if (list == &ftrace_events)
356 call = list_entry(list, struct ftrace_event_call, list);
358 if (!call->enabled) {
363 m->private = list->next;
368 static void *s_start(struct seq_file *m, loff_t *pos)
370 struct ftrace_event_call *call = NULL;
373 mutex_lock(&event_mutex);
375 m->private = ftrace_events.next;
376 for (l = 0; l <= *pos; ) {
377 call = s_next(m, NULL, &l);
384 static int t_show(struct seq_file *m, void *v)
386 struct ftrace_event_call *call = v;
388 if (strcmp(call->system, TRACE_SYSTEM) != 0)
389 seq_printf(m, "%s:", call->system);
390 seq_printf(m, "%s\n", call->name);
395 static void t_stop(struct seq_file *m, void *p)
397 mutex_unlock(&event_mutex);
401 ftrace_event_seq_open(struct inode *inode, struct file *file)
403 const struct seq_operations *seq_ops;
405 if ((file->f_mode & FMODE_WRITE) &&
406 (file->f_flags & O_TRUNC))
407 ftrace_clear_events();
409 seq_ops = inode->i_private;
410 return seq_open(file, seq_ops);
414 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
417 struct ftrace_event_call *call = filp->private_data;
425 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
429 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
432 struct ftrace_event_call *call = filp->private_data;
437 if (cnt >= sizeof(buf))
440 if (copy_from_user(&buf, ubuf, cnt))
445 ret = strict_strtoul(buf, 10, &val);
449 ret = tracing_update_buffers();
456 mutex_lock(&event_mutex);
457 ftrace_event_enable_disable(call, val);
458 mutex_unlock(&event_mutex);
471 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
474 const char set_to_char[4] = { '?', '0', '1', 'X' };
475 const char *system = filp->private_data;
476 struct ftrace_event_call *call;
481 mutex_lock(&event_mutex);
482 list_for_each_entry(call, &ftrace_events, list) {
483 if (!call->name || !call->regfunc)
486 if (system && strcmp(call->system, system) != 0)
490 * We need to find out if all the events are set
491 * or if all events or cleared, or if we have
494 set |= (1 << !!call->enabled);
497 * If we have a mixture, no need to look further.
502 mutex_unlock(&event_mutex);
504 buf[0] = set_to_char[set];
507 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
513 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
516 const char *system = filp->private_data;
521 if (cnt >= sizeof(buf))
524 if (copy_from_user(&buf, ubuf, cnt))
529 ret = strict_strtoul(buf, 10, &val);
533 ret = tracing_update_buffers();
537 if (val != 0 && val != 1)
540 ret = __ftrace_set_clr_event(NULL, system, NULL, val);
552 extern char *__bad_type_size(void);
555 #define FIELD(type, name) \
556 sizeof(type) != sizeof(field.name) ? __bad_type_size() : \
557 #type, "common_" #name, offsetof(typeof(field), name), \
560 static int trace_write_header(struct trace_seq *s)
562 struct trace_entry field;
564 /* struct trace_entry */
565 return trace_seq_printf(s,
566 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
567 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
568 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
569 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
570 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
572 FIELD(unsigned short, type),
573 FIELD(unsigned char, flags),
574 FIELD(unsigned char, preempt_count),
580 event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
583 struct ftrace_event_call *call = filp->private_data;
591 s = kmalloc(sizeof(*s), GFP_KERNEL);
597 /* If any of the first writes fail, so will the show_format. */
599 trace_seq_printf(s, "name: %s\n", call->name);
600 trace_seq_printf(s, "ID: %d\n", call->id);
601 trace_seq_printf(s, "format:\n");
602 trace_write_header(s);
604 r = call->show_format(call, s);
607 * ug! The format output is bigger than a PAGE!!
609 buf = "FORMAT TOO BIG\n";
610 r = simple_read_from_buffer(ubuf, cnt, ppos,
615 r = simple_read_from_buffer(ubuf, cnt, ppos,
623 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
625 struct ftrace_event_call *call = filp->private_data;
632 s = kmalloc(sizeof(*s), GFP_KERNEL);
637 trace_seq_printf(s, "%d\n", call->id);
639 r = simple_read_from_buffer(ubuf, cnt, ppos,
646 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
649 struct ftrace_event_call *call = filp->private_data;
656 s = kmalloc(sizeof(*s), GFP_KERNEL);
662 print_event_filter(call, s);
663 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
671 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
674 struct ftrace_event_call *call = filp->private_data;
678 if (cnt >= PAGE_SIZE)
681 buf = (char *)__get_free_page(GFP_TEMPORARY);
685 if (copy_from_user(buf, ubuf, cnt)) {
686 free_page((unsigned long) buf);
691 err = apply_event_filter(call, buf);
692 free_page((unsigned long) buf);
702 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
705 struct event_subsystem *system = filp->private_data;
712 s = kmalloc(sizeof(*s), GFP_KERNEL);
718 print_subsystem_event_filter(system, s);
719 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
727 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
730 struct event_subsystem *system = filp->private_data;
734 if (cnt >= PAGE_SIZE)
737 buf = (char *)__get_free_page(GFP_TEMPORARY);
741 if (copy_from_user(buf, ubuf, cnt)) {
742 free_page((unsigned long) buf);
747 err = apply_subsystem_event_filter(system, buf);
748 free_page((unsigned long) buf);
758 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
760 int (*func)(struct trace_seq *s) = filp->private_data;
767 s = kmalloc(sizeof(*s), GFP_KERNEL);
774 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
781 static const struct seq_operations show_event_seq_ops = {
788 static const struct seq_operations show_set_event_seq_ops = {
795 static const struct file_operations ftrace_avail_fops = {
796 .open = ftrace_event_seq_open,
799 .release = seq_release,
802 static const struct file_operations ftrace_set_event_fops = {
803 .open = ftrace_event_seq_open,
805 .write = ftrace_event_write,
807 .release = seq_release,
810 static const struct file_operations ftrace_enable_fops = {
811 .open = tracing_open_generic,
812 .read = event_enable_read,
813 .write = event_enable_write,
816 static const struct file_operations ftrace_event_format_fops = {
817 .open = tracing_open_generic,
818 .read = event_format_read,
821 static const struct file_operations ftrace_event_id_fops = {
822 .open = tracing_open_generic,
823 .read = event_id_read,
826 static const struct file_operations ftrace_event_filter_fops = {
827 .open = tracing_open_generic,
828 .read = event_filter_read,
829 .write = event_filter_write,
832 static const struct file_operations ftrace_subsystem_filter_fops = {
833 .open = tracing_open_generic,
834 .read = subsystem_filter_read,
835 .write = subsystem_filter_write,
838 static const struct file_operations ftrace_system_enable_fops = {
839 .open = tracing_open_generic,
840 .read = system_enable_read,
841 .write = system_enable_write,
844 static const struct file_operations ftrace_show_header_fops = {
845 .open = tracing_open_generic,
849 static struct dentry *event_trace_events_dir(void)
851 static struct dentry *d_tracer;
852 static struct dentry *d_events;
857 d_tracer = tracing_init_dentry();
861 d_events = debugfs_create_dir("events", d_tracer);
863 pr_warning("Could not create debugfs "
864 "'events' directory\n");
869 static LIST_HEAD(event_subsystems);
871 static struct dentry *
872 event_subsystem_dir(const char *name, struct dentry *d_events)
874 struct event_subsystem *system;
875 struct dentry *entry;
877 /* First see if we did not already create this dir */
878 list_for_each_entry(system, &event_subsystems, list) {
879 if (strcmp(system->name, name) == 0) {
881 return system->entry;
885 /* need to create new entry */
886 system = kmalloc(sizeof(*system), GFP_KERNEL);
888 pr_warning("No memory to create event subsystem %s\n",
893 system->entry = debugfs_create_dir(name, d_events);
894 if (!system->entry) {
895 pr_warning("Could not create event subsystem %s\n",
901 system->nr_events = 1;
902 system->name = kstrdup(name, GFP_KERNEL);
904 debugfs_remove(system->entry);
909 list_add(&system->list, &event_subsystems);
911 system->filter = NULL;
913 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
914 if (!system->filter) {
915 pr_warning("Could not allocate filter for subsystem "
917 return system->entry;
920 entry = debugfs_create_file("filter", 0644, system->entry, system,
921 &ftrace_subsystem_filter_fops);
923 kfree(system->filter);
924 system->filter = NULL;
925 pr_warning("Could not create debugfs "
926 "'%s/filter' entry\n", name);
929 entry = trace_create_file("enable", 0644, system->entry,
930 (void *)system->name,
931 &ftrace_system_enable_fops);
933 return system->entry;
937 event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
938 const struct file_operations *id,
939 const struct file_operations *enable,
940 const struct file_operations *filter,
941 const struct file_operations *format)
943 struct dentry *entry;
947 * If the trace point header did not define TRACE_SYSTEM
948 * then the system would be called "TRACE_SYSTEM".
950 if (strcmp(call->system, TRACE_SYSTEM) != 0)
951 d_events = event_subsystem_dir(call->system, d_events);
953 call->dir = debugfs_create_dir(call->name, d_events);
955 pr_warning("Could not create debugfs "
956 "'%s' directory\n", call->name);
961 entry = trace_create_file("enable", 0644, call->dir, call,
964 if (call->id && call->profile_enable)
965 entry = trace_create_file("id", 0444, call->dir, call,
968 if (call->define_fields) {
969 ret = call->define_fields(call);
971 pr_warning("Could not initialize trace point"
972 " events/%s\n", call->name);
975 entry = trace_create_file("filter", 0644, call->dir, call,
979 /* A trace may not want to export its format */
980 if (!call->show_format)
983 entry = trace_create_file("format", 0444, call->dir, call,
989 #define for_each_event(event, start, end) \
990 for (event = start; \
991 (unsigned long)event < (unsigned long)end; \
994 #ifdef CONFIG_MODULES
996 static LIST_HEAD(ftrace_module_file_list);
999 * Modules must own their file_operations to keep up with
1000 * reference counting.
1002 struct ftrace_module_file_ops {
1003 struct list_head list;
1005 struct file_operations id;
1006 struct file_operations enable;
1007 struct file_operations format;
1008 struct file_operations filter;
1011 static void remove_subsystem_dir(const char *name)
1013 struct event_subsystem *system;
1015 if (strcmp(name, TRACE_SYSTEM) == 0)
1018 list_for_each_entry(system, &event_subsystems, list) {
1019 if (strcmp(system->name, name) == 0) {
1020 if (!--system->nr_events) {
1021 struct event_filter *filter = system->filter;
1023 debugfs_remove_recursive(system->entry);
1024 list_del(&system->list);
1026 kfree(filter->filter_string);
1029 kfree(system->name);
1037 static struct ftrace_module_file_ops *
1038 trace_create_file_ops(struct module *mod)
1040 struct ftrace_module_file_ops *file_ops;
1043 * This is a bit of a PITA. To allow for correct reference
1044 * counting, modules must "own" their file_operations.
1045 * To do this, we allocate the file operations that will be
1046 * used in the event directory.
1049 file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1053 file_ops->mod = mod;
1055 file_ops->id = ftrace_event_id_fops;
1056 file_ops->id.owner = mod;
1058 file_ops->enable = ftrace_enable_fops;
1059 file_ops->enable.owner = mod;
1061 file_ops->filter = ftrace_event_filter_fops;
1062 file_ops->filter.owner = mod;
1064 file_ops->format = ftrace_event_format_fops;
1065 file_ops->format.owner = mod;
1067 list_add(&file_ops->list, &ftrace_module_file_list);
1072 static void trace_module_add_events(struct module *mod)
1074 struct ftrace_module_file_ops *file_ops = NULL;
1075 struct ftrace_event_call *call, *start, *end;
1076 struct dentry *d_events;
1079 start = mod->trace_events;
1080 end = mod->trace_events + mod->num_trace_events;
1085 d_events = event_trace_events_dir();
1089 for_each_event(call, start, end) {
1090 /* The linker may leave blanks */
1093 if (call->raw_init) {
1094 ret = call->raw_init();
1097 pr_warning("Could not initialize trace "
1098 "point events/%s\n", call->name);
1103 * This module has events, create file ops for this module
1104 * if not already done.
1107 file_ops = trace_create_file_ops(mod);
1112 list_add(&call->list, &ftrace_events);
1113 event_create_dir(call, d_events,
1114 &file_ops->id, &file_ops->enable,
1115 &file_ops->filter, &file_ops->format);
1119 static void trace_module_remove_events(struct module *mod)
1121 struct ftrace_module_file_ops *file_ops;
1122 struct ftrace_event_call *call, *p;
1125 down_write(&trace_event_mutex);
1126 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1127 if (call->mod == mod) {
1129 ftrace_event_enable_disable(call, 0);
1131 __unregister_ftrace_event(call->event);
1132 debugfs_remove_recursive(call->dir);
1133 list_del(&call->list);
1134 trace_destroy_fields(call);
1135 destroy_preds(call);
1136 remove_subsystem_dir(call->system);
1140 /* Now free the file_operations */
1141 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1142 if (file_ops->mod == mod)
1145 if (&file_ops->list != &ftrace_module_file_list) {
1146 list_del(&file_ops->list);
1151 * It is safest to reset the ring buffer if the module being unloaded
1152 * registered any events.
1155 tracing_reset_current_online_cpus();
1156 up_write(&trace_event_mutex);
1159 static int trace_module_notify(struct notifier_block *self,
1160 unsigned long val, void *data)
1162 struct module *mod = data;
1164 mutex_lock(&event_mutex);
1166 case MODULE_STATE_COMING:
1167 trace_module_add_events(mod);
1169 case MODULE_STATE_GOING:
1170 trace_module_remove_events(mod);
1173 mutex_unlock(&event_mutex);
1178 static int trace_module_notify(struct notifier_block *self,
1179 unsigned long val, void *data)
1183 #endif /* CONFIG_MODULES */
1185 struct notifier_block trace_module_nb = {
1186 .notifier_call = trace_module_notify,
1190 extern struct ftrace_event_call __start_ftrace_events[];
1191 extern struct ftrace_event_call __stop_ftrace_events[];
1193 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
1195 static __init int setup_trace_event(char *str)
1197 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
1198 ring_buffer_expanded = 1;
1199 tracing_selftest_disabled = 1;
1203 __setup("trace_event=", setup_trace_event);
1205 static __init int event_trace_init(void)
1207 struct ftrace_event_call *call;
1208 struct dentry *d_tracer;
1209 struct dentry *entry;
1210 struct dentry *d_events;
1212 char *buf = bootup_event_buf;
1215 d_tracer = tracing_init_dentry();
1219 entry = debugfs_create_file("available_events", 0444, d_tracer,
1220 (void *)&show_event_seq_ops,
1221 &ftrace_avail_fops);
1223 pr_warning("Could not create debugfs "
1224 "'available_events' entry\n");
1226 entry = debugfs_create_file("set_event", 0644, d_tracer,
1227 (void *)&show_set_event_seq_ops,
1228 &ftrace_set_event_fops);
1230 pr_warning("Could not create debugfs "
1231 "'set_event' entry\n");
1233 d_events = event_trace_events_dir();
1237 /* ring buffer internal formats */
1238 trace_create_file("header_page", 0444, d_events,
1239 ring_buffer_print_page_header,
1240 &ftrace_show_header_fops);
1242 trace_create_file("header_event", 0444, d_events,
1243 ring_buffer_print_entry_header,
1244 &ftrace_show_header_fops);
1246 trace_create_file("enable", 0644, d_events,
1247 NULL, &ftrace_system_enable_fops);
1249 for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
1250 /* The linker may leave blanks */
1253 if (call->raw_init) {
1254 ret = call->raw_init();
1257 pr_warning("Could not initialize trace "
1258 "point events/%s\n", call->name);
1262 list_add(&call->list, &ftrace_events);
1263 event_create_dir(call, d_events, &ftrace_event_id_fops,
1264 &ftrace_enable_fops, &ftrace_event_filter_fops,
1265 &ftrace_event_format_fops);
1269 token = strsep(&buf, ",");
1276 ret = ftrace_set_clr_event(token, 1);
1278 pr_warning("Failed to enable trace event: %s\n", token);
1281 ret = register_module_notifier(&trace_module_nb);
1283 pr_warning("Failed to register trace events module notifier\n");
1287 fs_initcall(event_trace_init);
1289 #ifdef CONFIG_FTRACE_STARTUP_TEST
1291 static DEFINE_SPINLOCK(test_spinlock);
1292 static DEFINE_SPINLOCK(test_spinlock_irq);
1293 static DEFINE_MUTEX(test_mutex);
1295 static __init void test_work(struct work_struct *dummy)
1297 spin_lock(&test_spinlock);
1298 spin_lock_irq(&test_spinlock_irq);
1300 spin_unlock_irq(&test_spinlock_irq);
1301 spin_unlock(&test_spinlock);
1303 mutex_lock(&test_mutex);
1305 mutex_unlock(&test_mutex);
1308 static __init int event_test_thread(void *unused)
1312 test_malloc = kmalloc(1234, GFP_KERNEL);
1314 pr_info("failed to kmalloc\n");
1316 schedule_on_each_cpu(test_work);
1320 set_current_state(TASK_INTERRUPTIBLE);
1321 while (!kthread_should_stop())
1328 * Do various things that may trigger events.
1330 static __init void event_test_stuff(void)
1332 struct task_struct *test_thread;
1334 test_thread = kthread_run(event_test_thread, NULL, "test-events");
1336 kthread_stop(test_thread);
1340 * For every trace event defined, we will test each trace point separately,
1341 * and then by groups, and finally all trace points.
1343 static __init void event_trace_self_tests(void)
1345 struct ftrace_event_call *call;
1346 struct event_subsystem *system;
1349 pr_info("Running tests on trace events:\n");
1351 list_for_each_entry(call, &ftrace_events, list) {
1353 /* Only test those that have a regfunc */
1357 pr_info("Testing event %s: ", call->name);
1360 * If an event is already enabled, someone is using
1361 * it and the self test should not be on.
1363 if (call->enabled) {
1364 pr_warning("Enabled event during self test!\n");
1369 ftrace_event_enable_disable(call, 1);
1371 ftrace_event_enable_disable(call, 0);
1376 /* Now test at the sub system level */
1378 pr_info("Running tests on trace event systems:\n");
1380 list_for_each_entry(system, &event_subsystems, list) {
1382 /* the ftrace system is special, skip it */
1383 if (strcmp(system->name, "ftrace") == 0)
1386 pr_info("Testing event system %s: ", system->name);
1388 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 1);
1389 if (WARN_ON_ONCE(ret)) {
1390 pr_warning("error enabling system %s\n",
1397 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0);
1398 if (WARN_ON_ONCE(ret))
1399 pr_warning("error disabling system %s\n",
1405 /* Test with all events enabled */
1407 pr_info("Running tests on all trace events:\n");
1408 pr_info("Testing all events: ");
1410 ret = __ftrace_set_clr_event(NULL, NULL, NULL, 1);
1411 if (WARN_ON_ONCE(ret)) {
1412 pr_warning("error enabling all events\n");
1419 ret = __ftrace_set_clr_event(NULL, NULL, NULL, 0);
1420 if (WARN_ON_ONCE(ret)) {
1421 pr_warning("error disabling all events\n");
1428 #ifdef CONFIG_FUNCTION_TRACER
1430 static DEFINE_PER_CPU(atomic_t, test_event_disable);
1433 function_test_events_call(unsigned long ip, unsigned long parent_ip)
1435 struct ring_buffer_event *event;
1436 struct ftrace_entry *entry;
1437 unsigned long flags;
1443 pc = preempt_count();
1444 resched = ftrace_preempt_disable();
1445 cpu = raw_smp_processor_id();
1446 disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu));
1451 local_save_flags(flags);
1453 event = trace_current_buffer_lock_reserve(TRACE_FN, sizeof(*entry),
1457 entry = ring_buffer_event_data(event);
1459 entry->parent_ip = parent_ip;
1461 trace_nowake_buffer_unlock_commit(event, flags, pc);
1464 atomic_dec(&per_cpu(test_event_disable, cpu));
1465 ftrace_preempt_enable(resched);
1468 static struct ftrace_ops trace_ops __initdata =
1470 .func = function_test_events_call,
1473 static __init void event_trace_self_test_with_function(void)
1475 register_ftrace_function(&trace_ops);
1476 pr_info("Running tests again, along with the function tracer\n");
1477 event_trace_self_tests();
1478 unregister_ftrace_function(&trace_ops);
1481 static __init void event_trace_self_test_with_function(void)
1486 static __init int event_trace_self_tests_init(void)
1488 if (!tracing_selftest_disabled) {
1489 event_trace_self_tests();
1490 event_trace_self_test_with_function();
1496 late_initcall(event_trace_self_tests_init);