tracing/events: simplify system_enable_read()
[safe/jmp/linux-2.6] / kernel / trace / trace_events.c
1 /*
2  * event tracer
3  *
4  * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5  *
6  *  - Added format output of fields of the trace point.
7  *    This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8  *
9  */
10
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/delay.h>
19
20 #include "trace_output.h"
21
22 #define TRACE_SYSTEM "TRACE_SYSTEM"
23
24 DEFINE_MUTEX(event_mutex);
25
26 LIST_HEAD(ftrace_events);
27
28 int trace_define_field(struct ftrace_event_call *call, char *type,
29                        char *name, int offset, int size, int is_signed)
30 {
31         struct ftrace_event_field *field;
32
33         field = kzalloc(sizeof(*field), GFP_KERNEL);
34         if (!field)
35                 goto err;
36
37         field->name = kstrdup(name, GFP_KERNEL);
38         if (!field->name)
39                 goto err;
40
41         field->type = kstrdup(type, GFP_KERNEL);
42         if (!field->type)
43                 goto err;
44
45         field->offset = offset;
46         field->size = size;
47         field->is_signed = is_signed;
48         list_add(&field->link, &call->fields);
49
50         return 0;
51
52 err:
53         if (field) {
54                 kfree(field->name);
55                 kfree(field->type);
56         }
57         kfree(field);
58
59         return -ENOMEM;
60 }
61 EXPORT_SYMBOL_GPL(trace_define_field);
62
63 #ifdef CONFIG_MODULES
64
65 static void trace_destroy_fields(struct ftrace_event_call *call)
66 {
67         struct ftrace_event_field *field, *next;
68
69         list_for_each_entry_safe(field, next, &call->fields, link) {
70                 list_del(&field->link);
71                 kfree(field->type);
72                 kfree(field->name);
73                 kfree(field);
74         }
75 }
76
77 #endif /* CONFIG_MODULES */
78
79 static void ftrace_clear_events(void)
80 {
81         struct ftrace_event_call *call;
82
83         mutex_lock(&event_mutex);
84         list_for_each_entry(call, &ftrace_events, list) {
85
86                 if (call->enabled) {
87                         call->enabled = 0;
88                         call->unregfunc();
89                 }
90         }
91         mutex_unlock(&event_mutex);
92 }
93
94 static void ftrace_event_enable_disable(struct ftrace_event_call *call,
95                                         int enable)
96 {
97
98         switch (enable) {
99         case 0:
100                 if (call->enabled) {
101                         call->enabled = 0;
102                         call->unregfunc();
103                 }
104                 break;
105         case 1:
106                 if (!call->enabled) {
107                         call->enabled = 1;
108                         call->regfunc();
109                 }
110                 break;
111         }
112 }
113
114 /*
115  * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
116  */
117 static int __ftrace_set_clr_event(const char *match, const char *sub,
118                                   const char *event, int set)
119 {
120         struct ftrace_event_call *call;
121         int ret;
122
123         mutex_lock(&event_mutex);
124         list_for_each_entry(call, &ftrace_events, list) {
125
126                 if (!call->name || !call->regfunc)
127                         continue;
128
129                 if (match &&
130                     strcmp(match, call->name) != 0 &&
131                     strcmp(match, call->system) != 0)
132                         continue;
133
134                 if (sub && strcmp(sub, call->system) != 0)
135                         continue;
136
137                 if (event && strcmp(event, call->name) != 0)
138                         continue;
139
140                 ftrace_event_enable_disable(call, set);
141
142                 ret = 0;
143         }
144         mutex_unlock(&event_mutex);
145
146         return ret;
147 }
148
149 static int ftrace_set_clr_event(char *buf, int set)
150 {
151         char *event = NULL, *sub = NULL, *match;
152
153         /*
154          * The buf format can be <subsystem>:<event-name>
155          *  *:<event-name> means any event by that name.
156          *  :<event-name> is the same.
157          *
158          *  <subsystem>:* means all events in that subsystem
159          *  <subsystem>: means the same.
160          *
161          *  <name> (no ':') means all events in a subsystem with
162          *  the name <name> or any event that matches <name>
163          */
164
165         match = strsep(&buf, ":");
166         if (buf) {
167                 sub = match;
168                 event = buf;
169                 match = NULL;
170
171                 if (!strlen(sub) || strcmp(sub, "*") == 0)
172                         sub = NULL;
173                 if (!strlen(event) || strcmp(event, "*") == 0)
174                         event = NULL;
175         }
176
177         return __ftrace_set_clr_event(match, sub, event, set);
178 }
179
180 /* 128 should be much more than enough */
181 #define EVENT_BUF_SIZE          127
182
183 static ssize_t
184 ftrace_event_write(struct file *file, const char __user *ubuf,
185                    size_t cnt, loff_t *ppos)
186 {
187         size_t read = 0;
188         int i, set = 1;
189         ssize_t ret;
190         char *buf;
191         char ch;
192
193         if (!cnt || cnt < 0)
194                 return 0;
195
196         ret = tracing_update_buffers();
197         if (ret < 0)
198                 return ret;
199
200         ret = get_user(ch, ubuf++);
201         if (ret)
202                 return ret;
203         read++;
204         cnt--;
205
206         /* skip white space */
207         while (cnt && isspace(ch)) {
208                 ret = get_user(ch, ubuf++);
209                 if (ret)
210                         return ret;
211                 read++;
212                 cnt--;
213         }
214
215         /* Only white space found? */
216         if (isspace(ch)) {
217                 file->f_pos += read;
218                 ret = read;
219                 return ret;
220         }
221
222         buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL);
223         if (!buf)
224                 return -ENOMEM;
225
226         if (cnt > EVENT_BUF_SIZE)
227                 cnt = EVENT_BUF_SIZE;
228
229         i = 0;
230         while (cnt && !isspace(ch)) {
231                 if (!i && ch == '!')
232                         set = 0;
233                 else
234                         buf[i++] = ch;
235
236                 ret = get_user(ch, ubuf++);
237                 if (ret)
238                         goto out_free;
239                 read++;
240                 cnt--;
241         }
242         buf[i] = 0;
243
244         file->f_pos += read;
245
246         ret = ftrace_set_clr_event(buf, set);
247         if (ret)
248                 goto out_free;
249
250         ret = read;
251
252  out_free:
253         kfree(buf);
254
255         return ret;
256 }
257
258 static void *
259 t_next(struct seq_file *m, void *v, loff_t *pos)
260 {
261         struct list_head *list = m->private;
262         struct ftrace_event_call *call;
263
264         (*pos)++;
265
266         for (;;) {
267                 if (list == &ftrace_events)
268                         return NULL;
269
270                 call = list_entry(list, struct ftrace_event_call, list);
271
272                 /*
273                  * The ftrace subsystem is for showing formats only.
274                  * They can not be enabled or disabled via the event files.
275                  */
276                 if (call->regfunc)
277                         break;
278
279                 list = list->next;
280         }
281
282         m->private = list->next;
283
284         return call;
285 }
286
287 static void *t_start(struct seq_file *m, loff_t *pos)
288 {
289         mutex_lock(&event_mutex);
290         if (*pos == 0)
291                 m->private = ftrace_events.next;
292         return t_next(m, NULL, pos);
293 }
294
295 static void *
296 s_next(struct seq_file *m, void *v, loff_t *pos)
297 {
298         struct list_head *list = m->private;
299         struct ftrace_event_call *call;
300
301         (*pos)++;
302
303  retry:
304         if (list == &ftrace_events)
305                 return NULL;
306
307         call = list_entry(list, struct ftrace_event_call, list);
308
309         if (!call->enabled) {
310                 list = list->next;
311                 goto retry;
312         }
313
314         m->private = list->next;
315
316         return call;
317 }
318
319 static void *s_start(struct seq_file *m, loff_t *pos)
320 {
321         mutex_lock(&event_mutex);
322         if (*pos == 0)
323                 m->private = ftrace_events.next;
324         return s_next(m, NULL, pos);
325 }
326
327 static int t_show(struct seq_file *m, void *v)
328 {
329         struct ftrace_event_call *call = v;
330
331         if (strcmp(call->system, TRACE_SYSTEM) != 0)
332                 seq_printf(m, "%s:", call->system);
333         seq_printf(m, "%s\n", call->name);
334
335         return 0;
336 }
337
338 static void t_stop(struct seq_file *m, void *p)
339 {
340         mutex_unlock(&event_mutex);
341 }
342
343 static int
344 ftrace_event_seq_open(struct inode *inode, struct file *file)
345 {
346         const struct seq_operations *seq_ops;
347
348         if ((file->f_mode & FMODE_WRITE) &&
349             !(file->f_flags & O_APPEND))
350                 ftrace_clear_events();
351
352         seq_ops = inode->i_private;
353         return seq_open(file, seq_ops);
354 }
355
356 static ssize_t
357 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
358                   loff_t *ppos)
359 {
360         struct ftrace_event_call *call = filp->private_data;
361         char *buf;
362
363         if (call->enabled)
364                 buf = "1\n";
365         else
366                 buf = "0\n";
367
368         return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
369 }
370
371 static ssize_t
372 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
373                    loff_t *ppos)
374 {
375         struct ftrace_event_call *call = filp->private_data;
376         char buf[64];
377         unsigned long val;
378         int ret;
379
380         if (cnt >= sizeof(buf))
381                 return -EINVAL;
382
383         if (copy_from_user(&buf, ubuf, cnt))
384                 return -EFAULT;
385
386         buf[cnt] = 0;
387
388         ret = strict_strtoul(buf, 10, &val);
389         if (ret < 0)
390                 return ret;
391
392         ret = tracing_update_buffers();
393         if (ret < 0)
394                 return ret;
395
396         switch (val) {
397         case 0:
398         case 1:
399                 mutex_lock(&event_mutex);
400                 ftrace_event_enable_disable(call, val);
401                 mutex_unlock(&event_mutex);
402                 break;
403
404         default:
405                 return -EINVAL;
406         }
407
408         *ppos += cnt;
409
410         return cnt;
411 }
412
413 static ssize_t
414 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
415                    loff_t *ppos)
416 {
417         const char set_to_char[4] = { '?', '0', '1', 'X' };
418         const char *system = filp->private_data;
419         struct ftrace_event_call *call;
420         char buf[2];
421         int set = 0;
422         int ret;
423
424         mutex_lock(&event_mutex);
425         list_for_each_entry(call, &ftrace_events, list) {
426                 if (!call->name || !call->regfunc)
427                         continue;
428
429                 if (system && strcmp(call->system, system) != 0)
430                         continue;
431
432                 /*
433                  * We need to find out if all the events are set
434                  * or if all events or cleared, or if we have
435                  * a mixture.
436                  */
437                 set |= (1 << !!call->enabled);
438
439                 /*
440                  * If we have a mixture, no need to look further.
441                  */
442                 if (set == 3)
443                         break;
444         }
445         mutex_unlock(&event_mutex);
446
447         buf[0] = set_to_char[set];
448         buf[1] = '\n';
449
450         ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
451
452         return ret;
453 }
454
455 static ssize_t
456 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
457                     loff_t *ppos)
458 {
459         const char *system = filp->private_data;
460         unsigned long val;
461         char buf[64];
462         ssize_t ret;
463
464         if (cnt >= sizeof(buf))
465                 return -EINVAL;
466
467         if (copy_from_user(&buf, ubuf, cnt))
468                 return -EFAULT;
469
470         buf[cnt] = 0;
471
472         ret = strict_strtoul(buf, 10, &val);
473         if (ret < 0)
474                 return ret;
475
476         ret = tracing_update_buffers();
477         if (ret < 0)
478                 return ret;
479
480         if (val != 0 && val != 1)
481                 return -EINVAL;
482
483         ret = __ftrace_set_clr_event(NULL, system, NULL, val);
484         if (ret)
485                 goto out;
486
487         ret = cnt;
488
489 out:
490         *ppos += cnt;
491
492         return ret;
493 }
494
495 extern char *__bad_type_size(void);
496
497 #undef FIELD
498 #define FIELD(type, name)                                               \
499         sizeof(type) != sizeof(field.name) ? __bad_type_size() :        \
500         #type, "common_" #name, offsetof(typeof(field), name),          \
501                 sizeof(field.name)
502
503 static int trace_write_header(struct trace_seq *s)
504 {
505         struct trace_entry field;
506
507         /* struct trace_entry */
508         return trace_seq_printf(s,
509                                 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
510                                 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
511                                 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
512                                 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
513                                 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
514                                 "\n",
515                                 FIELD(unsigned short, type),
516                                 FIELD(unsigned char, flags),
517                                 FIELD(unsigned char, preempt_count),
518                                 FIELD(int, pid),
519                                 FIELD(int, tgid));
520 }
521
522 static ssize_t
523 event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
524                   loff_t *ppos)
525 {
526         struct ftrace_event_call *call = filp->private_data;
527         struct trace_seq *s;
528         char *buf;
529         int r;
530
531         if (*ppos)
532                 return 0;
533
534         s = kmalloc(sizeof(*s), GFP_KERNEL);
535         if (!s)
536                 return -ENOMEM;
537
538         trace_seq_init(s);
539
540         /* If any of the first writes fail, so will the show_format. */
541
542         trace_seq_printf(s, "name: %s\n", call->name);
543         trace_seq_printf(s, "ID: %d\n", call->id);
544         trace_seq_printf(s, "format:\n");
545         trace_write_header(s);
546
547         r = call->show_format(s);
548         if (!r) {
549                 /*
550                  * ug!  The format output is bigger than a PAGE!!
551                  */
552                 buf = "FORMAT TOO BIG\n";
553                 r = simple_read_from_buffer(ubuf, cnt, ppos,
554                                               buf, strlen(buf));
555                 goto out;
556         }
557
558         r = simple_read_from_buffer(ubuf, cnt, ppos,
559                                     s->buffer, s->len);
560  out:
561         kfree(s);
562         return r;
563 }
564
565 static ssize_t
566 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
567 {
568         struct ftrace_event_call *call = filp->private_data;
569         struct trace_seq *s;
570         int r;
571
572         if (*ppos)
573                 return 0;
574
575         s = kmalloc(sizeof(*s), GFP_KERNEL);
576         if (!s)
577                 return -ENOMEM;
578
579         trace_seq_init(s);
580         trace_seq_printf(s, "%d\n", call->id);
581
582         r = simple_read_from_buffer(ubuf, cnt, ppos,
583                                     s->buffer, s->len);
584         kfree(s);
585         return r;
586 }
587
588 static ssize_t
589 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
590                   loff_t *ppos)
591 {
592         struct ftrace_event_call *call = filp->private_data;
593         struct trace_seq *s;
594         int r;
595
596         if (*ppos)
597                 return 0;
598
599         s = kmalloc(sizeof(*s), GFP_KERNEL);
600         if (!s)
601                 return -ENOMEM;
602
603         trace_seq_init(s);
604
605         print_event_filter(call, s);
606         r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
607
608         kfree(s);
609
610         return r;
611 }
612
613 static ssize_t
614 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
615                    loff_t *ppos)
616 {
617         struct ftrace_event_call *call = filp->private_data;
618         char *buf;
619         int err;
620
621         if (cnt >= PAGE_SIZE)
622                 return -EINVAL;
623
624         buf = (char *)__get_free_page(GFP_TEMPORARY);
625         if (!buf)
626                 return -ENOMEM;
627
628         if (copy_from_user(buf, ubuf, cnt)) {
629                 free_page((unsigned long) buf);
630                 return -EFAULT;
631         }
632         buf[cnt] = '\0';
633
634         err = apply_event_filter(call, buf);
635         free_page((unsigned long) buf);
636         if (err < 0)
637                 return err;
638
639         *ppos += cnt;
640
641         return cnt;
642 }
643
644 static ssize_t
645 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
646                       loff_t *ppos)
647 {
648         struct event_subsystem *system = filp->private_data;
649         struct trace_seq *s;
650         int r;
651
652         if (*ppos)
653                 return 0;
654
655         s = kmalloc(sizeof(*s), GFP_KERNEL);
656         if (!s)
657                 return -ENOMEM;
658
659         trace_seq_init(s);
660
661         print_subsystem_event_filter(system, s);
662         r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
663
664         kfree(s);
665
666         return r;
667 }
668
669 static ssize_t
670 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
671                        loff_t *ppos)
672 {
673         struct event_subsystem *system = filp->private_data;
674         char *buf;
675         int err;
676
677         if (cnt >= PAGE_SIZE)
678                 return -EINVAL;
679
680         buf = (char *)__get_free_page(GFP_TEMPORARY);
681         if (!buf)
682                 return -ENOMEM;
683
684         if (copy_from_user(buf, ubuf, cnt)) {
685                 free_page((unsigned long) buf);
686                 return -EFAULT;
687         }
688         buf[cnt] = '\0';
689
690         err = apply_subsystem_event_filter(system, buf);
691         free_page((unsigned long) buf);
692         if (err < 0)
693                 return err;
694
695         *ppos += cnt;
696
697         return cnt;
698 }
699
700 static ssize_t
701 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
702 {
703         int (*func)(struct trace_seq *s) = filp->private_data;
704         struct trace_seq *s;
705         int r;
706
707         if (*ppos)
708                 return 0;
709
710         s = kmalloc(sizeof(*s), GFP_KERNEL);
711         if (!s)
712                 return -ENOMEM;
713
714         trace_seq_init(s);
715
716         func(s);
717         r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
718
719         kfree(s);
720
721         return r;
722 }
723
724 static const struct seq_operations show_event_seq_ops = {
725         .start = t_start,
726         .next = t_next,
727         .show = t_show,
728         .stop = t_stop,
729 };
730
731 static const struct seq_operations show_set_event_seq_ops = {
732         .start = s_start,
733         .next = s_next,
734         .show = t_show,
735         .stop = t_stop,
736 };
737
738 static const struct file_operations ftrace_avail_fops = {
739         .open = ftrace_event_seq_open,
740         .read = seq_read,
741         .llseek = seq_lseek,
742         .release = seq_release,
743 };
744
745 static const struct file_operations ftrace_set_event_fops = {
746         .open = ftrace_event_seq_open,
747         .read = seq_read,
748         .write = ftrace_event_write,
749         .llseek = seq_lseek,
750         .release = seq_release,
751 };
752
753 static const struct file_operations ftrace_enable_fops = {
754         .open = tracing_open_generic,
755         .read = event_enable_read,
756         .write = event_enable_write,
757 };
758
759 static const struct file_operations ftrace_event_format_fops = {
760         .open = tracing_open_generic,
761         .read = event_format_read,
762 };
763
764 static const struct file_operations ftrace_event_id_fops = {
765         .open = tracing_open_generic,
766         .read = event_id_read,
767 };
768
769 static const struct file_operations ftrace_event_filter_fops = {
770         .open = tracing_open_generic,
771         .read = event_filter_read,
772         .write = event_filter_write,
773 };
774
775 static const struct file_operations ftrace_subsystem_filter_fops = {
776         .open = tracing_open_generic,
777         .read = subsystem_filter_read,
778         .write = subsystem_filter_write,
779 };
780
781 static const struct file_operations ftrace_system_enable_fops = {
782         .open = tracing_open_generic,
783         .read = system_enable_read,
784         .write = system_enable_write,
785 };
786
787 static const struct file_operations ftrace_show_header_fops = {
788         .open = tracing_open_generic,
789         .read = show_header,
790 };
791
792 static struct dentry *event_trace_events_dir(void)
793 {
794         static struct dentry *d_tracer;
795         static struct dentry *d_events;
796
797         if (d_events)
798                 return d_events;
799
800         d_tracer = tracing_init_dentry();
801         if (!d_tracer)
802                 return NULL;
803
804         d_events = debugfs_create_dir("events", d_tracer);
805         if (!d_events)
806                 pr_warning("Could not create debugfs "
807                            "'events' directory\n");
808
809         return d_events;
810 }
811
812 static LIST_HEAD(event_subsystems);
813
814 static struct dentry *
815 event_subsystem_dir(const char *name, struct dentry *d_events)
816 {
817         struct event_subsystem *system;
818         struct dentry *entry;
819
820         /* First see if we did not already create this dir */
821         list_for_each_entry(system, &event_subsystems, list) {
822                 if (strcmp(system->name, name) == 0)
823                         return system->entry;
824         }
825
826         /* need to create new entry */
827         system = kmalloc(sizeof(*system), GFP_KERNEL);
828         if (!system) {
829                 pr_warning("No memory to create event subsystem %s\n",
830                            name);
831                 return d_events;
832         }
833
834         system->entry = debugfs_create_dir(name, d_events);
835         if (!system->entry) {
836                 pr_warning("Could not create event subsystem %s\n",
837                            name);
838                 kfree(system);
839                 return d_events;
840         }
841
842         system->name = kstrdup(name, GFP_KERNEL);
843         if (!system->name) {
844                 debugfs_remove(system->entry);
845                 kfree(system);
846                 return d_events;
847         }
848
849         list_add(&system->list, &event_subsystems);
850
851         system->filter = NULL;
852
853         system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
854         if (!system->filter) {
855                 pr_warning("Could not allocate filter for subsystem "
856                            "'%s'\n", name);
857                 return system->entry;
858         }
859
860         entry = debugfs_create_file("filter", 0644, system->entry, system,
861                                     &ftrace_subsystem_filter_fops);
862         if (!entry) {
863                 kfree(system->filter);
864                 system->filter = NULL;
865                 pr_warning("Could not create debugfs "
866                            "'%s/filter' entry\n", name);
867         }
868
869         entry = trace_create_file("enable", 0644, system->entry,
870                                   (void *)system->name,
871                                   &ftrace_system_enable_fops);
872
873         return system->entry;
874 }
875
876 static int
877 event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
878                  const struct file_operations *id,
879                  const struct file_operations *enable,
880                  const struct file_operations *filter,
881                  const struct file_operations *format)
882 {
883         struct dentry *entry;
884         int ret;
885
886         /*
887          * If the trace point header did not define TRACE_SYSTEM
888          * then the system would be called "TRACE_SYSTEM".
889          */
890         if (strcmp(call->system, TRACE_SYSTEM) != 0)
891                 d_events = event_subsystem_dir(call->system, d_events);
892
893         if (call->raw_init) {
894                 ret = call->raw_init();
895                 if (ret < 0) {
896                         pr_warning("Could not initialize trace point"
897                                    " events/%s\n", call->name);
898                         return ret;
899                 }
900         }
901
902         call->dir = debugfs_create_dir(call->name, d_events);
903         if (!call->dir) {
904                 pr_warning("Could not create debugfs "
905                            "'%s' directory\n", call->name);
906                 return -1;
907         }
908
909         if (call->regfunc)
910                 entry = trace_create_file("enable", 0644, call->dir, call,
911                                           enable);
912
913         if (call->id)
914                 entry = trace_create_file("id", 0444, call->dir, call,
915                                           id);
916
917         if (call->define_fields) {
918                 ret = call->define_fields();
919                 if (ret < 0) {
920                         pr_warning("Could not initialize trace point"
921                                    " events/%s\n", call->name);
922                         return ret;
923                 }
924                 entry = trace_create_file("filter", 0644, call->dir, call,
925                                           filter);
926         }
927
928         /* A trace may not want to export its format */
929         if (!call->show_format)
930                 return 0;
931
932         entry = trace_create_file("format", 0444, call->dir, call,
933                                   format);
934
935         return 0;
936 }
937
938 #define for_each_event(event, start, end)                       \
939         for (event = start;                                     \
940              (unsigned long)event < (unsigned long)end;         \
941              event++)
942
943 #ifdef CONFIG_MODULES
944
945 static LIST_HEAD(ftrace_module_file_list);
946
947 /*
948  * Modules must own their file_operations to keep up with
949  * reference counting.
950  */
951 struct ftrace_module_file_ops {
952         struct list_head                list;
953         struct module                   *mod;
954         struct file_operations          id;
955         struct file_operations          enable;
956         struct file_operations          format;
957         struct file_operations          filter;
958 };
959
960 static struct ftrace_module_file_ops *
961 trace_create_file_ops(struct module *mod)
962 {
963         struct ftrace_module_file_ops *file_ops;
964
965         /*
966          * This is a bit of a PITA. To allow for correct reference
967          * counting, modules must "own" their file_operations.
968          * To do this, we allocate the file operations that will be
969          * used in the event directory.
970          */
971
972         file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
973         if (!file_ops)
974                 return NULL;
975
976         file_ops->mod = mod;
977
978         file_ops->id = ftrace_event_id_fops;
979         file_ops->id.owner = mod;
980
981         file_ops->enable = ftrace_enable_fops;
982         file_ops->enable.owner = mod;
983
984         file_ops->filter = ftrace_event_filter_fops;
985         file_ops->filter.owner = mod;
986
987         file_ops->format = ftrace_event_format_fops;
988         file_ops->format.owner = mod;
989
990         list_add(&file_ops->list, &ftrace_module_file_list);
991
992         return file_ops;
993 }
994
995 static void trace_module_add_events(struct module *mod)
996 {
997         struct ftrace_module_file_ops *file_ops = NULL;
998         struct ftrace_event_call *call, *start, *end;
999         struct dentry *d_events;
1000
1001         start = mod->trace_events;
1002         end = mod->trace_events + mod->num_trace_events;
1003
1004         if (start == end)
1005                 return;
1006
1007         d_events = event_trace_events_dir();
1008         if (!d_events)
1009                 return;
1010
1011         for_each_event(call, start, end) {
1012                 /* The linker may leave blanks */
1013                 if (!call->name)
1014                         continue;
1015
1016                 /*
1017                  * This module has events, create file ops for this module
1018                  * if not already done.
1019                  */
1020                 if (!file_ops) {
1021                         file_ops = trace_create_file_ops(mod);
1022                         if (!file_ops)
1023                                 return;
1024                 }
1025                 call->mod = mod;
1026                 list_add(&call->list, &ftrace_events);
1027                 event_create_dir(call, d_events,
1028                                  &file_ops->id, &file_ops->enable,
1029                                  &file_ops->filter, &file_ops->format);
1030         }
1031 }
1032
1033 static void trace_module_remove_events(struct module *mod)
1034 {
1035         struct ftrace_module_file_ops *file_ops;
1036         struct ftrace_event_call *call, *p;
1037         bool found = false;
1038
1039         list_for_each_entry_safe(call, p, &ftrace_events, list) {
1040                 if (call->mod == mod) {
1041                         found = true;
1042                         if (call->enabled) {
1043                                 call->enabled = 0;
1044                                 call->unregfunc();
1045                         }
1046                         if (call->event)
1047                                 unregister_ftrace_event(call->event);
1048                         debugfs_remove_recursive(call->dir);
1049                         list_del(&call->list);
1050                         trace_destroy_fields(call);
1051                         destroy_preds(call);
1052                 }
1053         }
1054
1055         /* Now free the file_operations */
1056         list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1057                 if (file_ops->mod == mod)
1058                         break;
1059         }
1060         if (&file_ops->list != &ftrace_module_file_list) {
1061                 list_del(&file_ops->list);
1062                 kfree(file_ops);
1063         }
1064
1065         /*
1066          * It is safest to reset the ring buffer if the module being unloaded
1067          * registered any events.
1068          */
1069         if (found)
1070                 tracing_reset_current_online_cpus();
1071 }
1072
1073 static int trace_module_notify(struct notifier_block *self,
1074                                unsigned long val, void *data)
1075 {
1076         struct module *mod = data;
1077
1078         mutex_lock(&event_mutex);
1079         switch (val) {
1080         case MODULE_STATE_COMING:
1081                 trace_module_add_events(mod);
1082                 break;
1083         case MODULE_STATE_GOING:
1084                 trace_module_remove_events(mod);
1085                 break;
1086         }
1087         mutex_unlock(&event_mutex);
1088
1089         return 0;
1090 }
1091 #else
1092 static int trace_module_notify(struct notifier_block *self,
1093                                unsigned long val, void *data)
1094 {
1095         return 0;
1096 }
1097 #endif /* CONFIG_MODULES */
1098
1099 struct notifier_block trace_module_nb = {
1100         .notifier_call = trace_module_notify,
1101         .priority = 0,
1102 };
1103
1104 extern struct ftrace_event_call __start_ftrace_events[];
1105 extern struct ftrace_event_call __stop_ftrace_events[];
1106
1107 static __init int event_trace_init(void)
1108 {
1109         struct ftrace_event_call *call;
1110         struct dentry *d_tracer;
1111         struct dentry *entry;
1112         struct dentry *d_events;
1113         int ret;
1114
1115         d_tracer = tracing_init_dentry();
1116         if (!d_tracer)
1117                 return 0;
1118
1119         entry = debugfs_create_file("available_events", 0444, d_tracer,
1120                                     (void *)&show_event_seq_ops,
1121                                     &ftrace_avail_fops);
1122         if (!entry)
1123                 pr_warning("Could not create debugfs "
1124                            "'available_events' entry\n");
1125
1126         entry = debugfs_create_file("set_event", 0644, d_tracer,
1127                                     (void *)&show_set_event_seq_ops,
1128                                     &ftrace_set_event_fops);
1129         if (!entry)
1130                 pr_warning("Could not create debugfs "
1131                            "'set_event' entry\n");
1132
1133         d_events = event_trace_events_dir();
1134         if (!d_events)
1135                 return 0;
1136
1137         /* ring buffer internal formats */
1138         trace_create_file("header_page", 0444, d_events,
1139                           ring_buffer_print_page_header,
1140                           &ftrace_show_header_fops);
1141
1142         trace_create_file("header_event", 0444, d_events,
1143                           ring_buffer_print_entry_header,
1144                           &ftrace_show_header_fops);
1145
1146         trace_create_file("enable", 0644, d_events,
1147                           NULL, &ftrace_system_enable_fops);
1148
1149         for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
1150                 /* The linker may leave blanks */
1151                 if (!call->name)
1152                         continue;
1153                 list_add(&call->list, &ftrace_events);
1154                 event_create_dir(call, d_events, &ftrace_event_id_fops,
1155                                  &ftrace_enable_fops, &ftrace_event_filter_fops,
1156                                  &ftrace_event_format_fops);
1157         }
1158
1159         ret = register_module_notifier(&trace_module_nb);
1160         if (!ret)
1161                 pr_warning("Failed to register trace events module notifier\n");
1162
1163         return 0;
1164 }
1165 fs_initcall(event_trace_init);
1166
1167 #ifdef CONFIG_FTRACE_STARTUP_TEST
1168
1169 static DEFINE_SPINLOCK(test_spinlock);
1170 static DEFINE_SPINLOCK(test_spinlock_irq);
1171 static DEFINE_MUTEX(test_mutex);
1172
1173 static __init void test_work(struct work_struct *dummy)
1174 {
1175         spin_lock(&test_spinlock);
1176         spin_lock_irq(&test_spinlock_irq);
1177         udelay(1);
1178         spin_unlock_irq(&test_spinlock_irq);
1179         spin_unlock(&test_spinlock);
1180
1181         mutex_lock(&test_mutex);
1182         msleep(1);
1183         mutex_unlock(&test_mutex);
1184 }
1185
1186 static __init int event_test_thread(void *unused)
1187 {
1188         void *test_malloc;
1189
1190         test_malloc = kmalloc(1234, GFP_KERNEL);
1191         if (!test_malloc)
1192                 pr_info("failed to kmalloc\n");
1193
1194         schedule_on_each_cpu(test_work);
1195
1196         kfree(test_malloc);
1197
1198         set_current_state(TASK_INTERRUPTIBLE);
1199         while (!kthread_should_stop())
1200                 schedule();
1201
1202         return 0;
1203 }
1204
1205 /*
1206  * Do various things that may trigger events.
1207  */
1208 static __init void event_test_stuff(void)
1209 {
1210         struct task_struct *test_thread;
1211
1212         test_thread = kthread_run(event_test_thread, NULL, "test-events");
1213         msleep(1);
1214         kthread_stop(test_thread);
1215 }
1216
1217 /*
1218  * For every trace event defined, we will test each trace point separately,
1219  * and then by groups, and finally all trace points.
1220  */
1221 static __init void event_trace_self_tests(void)
1222 {
1223         struct ftrace_event_call *call;
1224         struct event_subsystem *system;
1225         int ret;
1226
1227         pr_info("Running tests on trace events:\n");
1228
1229         list_for_each_entry(call, &ftrace_events, list) {
1230
1231                 /* Only test those that have a regfunc */
1232                 if (!call->regfunc)
1233                         continue;
1234
1235                 pr_info("Testing event %s: ", call->name);
1236
1237                 /*
1238                  * If an event is already enabled, someone is using
1239                  * it and the self test should not be on.
1240                  */
1241                 if (call->enabled) {
1242                         pr_warning("Enabled event during self test!\n");
1243                         WARN_ON_ONCE(1);
1244                         continue;
1245                 }
1246
1247                 call->enabled = 1;
1248                 call->regfunc();
1249
1250                 event_test_stuff();
1251
1252                 call->unregfunc();
1253                 call->enabled = 0;
1254
1255                 pr_cont("OK\n");
1256         }
1257
1258         /* Now test at the sub system level */
1259
1260         pr_info("Running tests on trace event systems:\n");
1261
1262         list_for_each_entry(system, &event_subsystems, list) {
1263
1264                 /* the ftrace system is special, skip it */
1265                 if (strcmp(system->name, "ftrace") == 0)
1266                         continue;
1267
1268                 pr_info("Testing event system %s: ", system->name);
1269
1270                 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 1);
1271                 if (WARN_ON_ONCE(ret)) {
1272                         pr_warning("error enabling system %s\n",
1273                                    system->name);
1274                         continue;
1275                 }
1276
1277                 event_test_stuff();
1278
1279                 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0);
1280                 if (WARN_ON_ONCE(ret))
1281                         pr_warning("error disabling system %s\n",
1282                                    system->name);
1283
1284                 pr_cont("OK\n");
1285         }
1286
1287         /* Test with all events enabled */
1288
1289         pr_info("Running tests on all trace events:\n");
1290         pr_info("Testing all events: ");
1291
1292         ret = __ftrace_set_clr_event(NULL, NULL, NULL, 1);
1293         if (WARN_ON_ONCE(ret)) {
1294                 pr_warning("error enabling all events\n");
1295                 return;
1296         }
1297
1298         event_test_stuff();
1299
1300         /* reset sysname */
1301         ret = __ftrace_set_clr_event(NULL, NULL, NULL, 0);
1302         if (WARN_ON_ONCE(ret)) {
1303                 pr_warning("error disabling all events\n");
1304                 return;
1305         }
1306
1307         pr_cont("OK\n");
1308 }
1309
1310 #ifdef CONFIG_FUNCTION_TRACER
1311
1312 static DEFINE_PER_CPU(atomic_t, test_event_disable);
1313
1314 static void
1315 function_test_events_call(unsigned long ip, unsigned long parent_ip)
1316 {
1317         struct ring_buffer_event *event;
1318         struct ftrace_entry *entry;
1319         unsigned long flags;
1320         long disabled;
1321         int resched;
1322         int cpu;
1323         int pc;
1324
1325         pc = preempt_count();
1326         resched = ftrace_preempt_disable();
1327         cpu = raw_smp_processor_id();
1328         disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu));
1329
1330         if (disabled != 1)
1331                 goto out;
1332
1333         local_save_flags(flags);
1334
1335         event = trace_current_buffer_lock_reserve(TRACE_FN, sizeof(*entry),
1336                                                   flags, pc);
1337         if (!event)
1338                 goto out;
1339         entry   = ring_buffer_event_data(event);
1340         entry->ip                       = ip;
1341         entry->parent_ip                = parent_ip;
1342
1343         trace_nowake_buffer_unlock_commit(event, flags, pc);
1344
1345  out:
1346         atomic_dec(&per_cpu(test_event_disable, cpu));
1347         ftrace_preempt_enable(resched);
1348 }
1349
1350 static struct ftrace_ops trace_ops __initdata  =
1351 {
1352         .func = function_test_events_call,
1353 };
1354
1355 static __init void event_trace_self_test_with_function(void)
1356 {
1357         register_ftrace_function(&trace_ops);
1358         pr_info("Running tests again, along with the function tracer\n");
1359         event_trace_self_tests();
1360         unregister_ftrace_function(&trace_ops);
1361 }
1362 #else
1363 static __init void event_trace_self_test_with_function(void)
1364 {
1365 }
1366 #endif
1367
1368 static __init int event_trace_self_tests_init(void)
1369 {
1370
1371         event_trace_self_tests();
1372
1373         event_trace_self_test_with_function();
1374
1375         return 0;
1376 }
1377
1378 late_initcall(event_trace_self_tests_init);
1379
1380 #endif