8d579ff236101c6a415fd9ce03a82119f6afcf6a
[safe/jmp/linux-2.6] / kernel / trace / trace_events.c
1 /*
2  * event tracer
3  *
4  * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5  *
6  *  - Added format output of fields of the trace point.
7  *    This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8  *
9  */
10
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/delay.h>
19
20 #include "trace_output.h"
21
22 #define TRACE_SYSTEM "TRACE_SYSTEM"
23
24 DEFINE_MUTEX(event_mutex);
25
26 LIST_HEAD(ftrace_events);
27
28 int trace_define_field(struct ftrace_event_call *call, char *type,
29                        char *name, int offset, int size, int is_signed)
30 {
31         struct ftrace_event_field *field;
32
33         field = kzalloc(sizeof(*field), GFP_KERNEL);
34         if (!field)
35                 goto err;
36
37         field->name = kstrdup(name, GFP_KERNEL);
38         if (!field->name)
39                 goto err;
40
41         field->type = kstrdup(type, GFP_KERNEL);
42         if (!field->type)
43                 goto err;
44
45         field->offset = offset;
46         field->size = size;
47         field->is_signed = is_signed;
48         list_add(&field->link, &call->fields);
49
50         return 0;
51
52 err:
53         if (field) {
54                 kfree(field->name);
55                 kfree(field->type);
56         }
57         kfree(field);
58
59         return -ENOMEM;
60 }
61 EXPORT_SYMBOL_GPL(trace_define_field);
62
63 #ifdef CONFIG_MODULES
64
65 static void trace_destroy_fields(struct ftrace_event_call *call)
66 {
67         struct ftrace_event_field *field, *next;
68
69         list_for_each_entry_safe(field, next, &call->fields, link) {
70                 list_del(&field->link);
71                 kfree(field->type);
72                 kfree(field->name);
73                 kfree(field);
74         }
75 }
76
77 #endif /* CONFIG_MODULES */
78
79 static void ftrace_clear_events(void)
80 {
81         struct ftrace_event_call *call;
82
83         mutex_lock(&event_mutex);
84         list_for_each_entry(call, &ftrace_events, list) {
85
86                 if (call->enabled) {
87                         call->enabled = 0;
88                         call->unregfunc();
89                 }
90         }
91         mutex_unlock(&event_mutex);
92 }
93
94 static void ftrace_event_enable_disable(struct ftrace_event_call *call,
95                                         int enable)
96 {
97
98         switch (enable) {
99         case 0:
100                 if (call->enabled) {
101                         call->enabled = 0;
102                         call->unregfunc();
103                 }
104                 break;
105         case 1:
106                 if (!call->enabled) {
107                         call->enabled = 1;
108                         call->regfunc();
109                 }
110                 break;
111         }
112 }
113
114 static int ftrace_set_clr_event(char *buf, int set)
115 {
116         struct ftrace_event_call *call;
117         char *event = NULL, *sub = NULL, *match;
118         int ret = -EINVAL;
119
120         /*
121          * The buf format can be <subsystem>:<event-name>
122          *  *:<event-name> means any event by that name.
123          *  :<event-name> is the same.
124          *
125          *  <subsystem>:* means all events in that subsystem
126          *  <subsystem>: means the same.
127          *
128          *  <name> (no ':') means all events in a subsystem with
129          *  the name <name> or any event that matches <name>
130          */
131
132         match = strsep(&buf, ":");
133         if (buf) {
134                 sub = match;
135                 event = buf;
136                 match = NULL;
137
138                 if (!strlen(sub) || strcmp(sub, "*") == 0)
139                         sub = NULL;
140                 if (!strlen(event) || strcmp(event, "*") == 0)
141                         event = NULL;
142         }
143
144         mutex_lock(&event_mutex);
145         list_for_each_entry(call, &ftrace_events, list) {
146
147                 if (!call->name || !call->regfunc)
148                         continue;
149
150                 if (match &&
151                     strcmp(match, call->name) != 0 &&
152                     strcmp(match, call->system) != 0)
153                         continue;
154
155                 if (sub && strcmp(sub, call->system) != 0)
156                         continue;
157
158                 if (event && strcmp(event, call->name) != 0)
159                         continue;
160
161                 ftrace_event_enable_disable(call, set);
162
163                 ret = 0;
164         }
165         mutex_unlock(&event_mutex);
166
167         return ret;
168 }
169
170 /* 128 should be much more than enough */
171 #define EVENT_BUF_SIZE          127
172
173 static ssize_t
174 ftrace_event_write(struct file *file, const char __user *ubuf,
175                    size_t cnt, loff_t *ppos)
176 {
177         size_t read = 0;
178         int i, set = 1;
179         ssize_t ret;
180         char *buf;
181         char ch;
182
183         if (!cnt || cnt < 0)
184                 return 0;
185
186         ret = tracing_update_buffers();
187         if (ret < 0)
188                 return ret;
189
190         ret = get_user(ch, ubuf++);
191         if (ret)
192                 return ret;
193         read++;
194         cnt--;
195
196         /* skip white space */
197         while (cnt && isspace(ch)) {
198                 ret = get_user(ch, ubuf++);
199                 if (ret)
200                         return ret;
201                 read++;
202                 cnt--;
203         }
204
205         /* Only white space found? */
206         if (isspace(ch)) {
207                 file->f_pos += read;
208                 ret = read;
209                 return ret;
210         }
211
212         buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL);
213         if (!buf)
214                 return -ENOMEM;
215
216         if (cnt > EVENT_BUF_SIZE)
217                 cnt = EVENT_BUF_SIZE;
218
219         i = 0;
220         while (cnt && !isspace(ch)) {
221                 if (!i && ch == '!')
222                         set = 0;
223                 else
224                         buf[i++] = ch;
225
226                 ret = get_user(ch, ubuf++);
227                 if (ret)
228                         goto out_free;
229                 read++;
230                 cnt--;
231         }
232         buf[i] = 0;
233
234         file->f_pos += read;
235
236         ret = ftrace_set_clr_event(buf, set);
237         if (ret)
238                 goto out_free;
239
240         ret = read;
241
242  out_free:
243         kfree(buf);
244
245         return ret;
246 }
247
248 static void *
249 t_next(struct seq_file *m, void *v, loff_t *pos)
250 {
251         struct list_head *list = m->private;
252         struct ftrace_event_call *call;
253
254         (*pos)++;
255
256         for (;;) {
257                 if (list == &ftrace_events)
258                         return NULL;
259
260                 call = list_entry(list, struct ftrace_event_call, list);
261
262                 /*
263                  * The ftrace subsystem is for showing formats only.
264                  * They can not be enabled or disabled via the event files.
265                  */
266                 if (call->regfunc)
267                         break;
268
269                 list = list->next;
270         }
271
272         m->private = list->next;
273
274         return call;
275 }
276
277 static void *t_start(struct seq_file *m, loff_t *pos)
278 {
279         mutex_lock(&event_mutex);
280         if (*pos == 0)
281                 m->private = ftrace_events.next;
282         return t_next(m, NULL, pos);
283 }
284
285 static void *
286 s_next(struct seq_file *m, void *v, loff_t *pos)
287 {
288         struct list_head *list = m->private;
289         struct ftrace_event_call *call;
290
291         (*pos)++;
292
293  retry:
294         if (list == &ftrace_events)
295                 return NULL;
296
297         call = list_entry(list, struct ftrace_event_call, list);
298
299         if (!call->enabled) {
300                 list = list->next;
301                 goto retry;
302         }
303
304         m->private = list->next;
305
306         return call;
307 }
308
309 static void *s_start(struct seq_file *m, loff_t *pos)
310 {
311         mutex_lock(&event_mutex);
312         if (*pos == 0)
313                 m->private = ftrace_events.next;
314         return s_next(m, NULL, pos);
315 }
316
317 static int t_show(struct seq_file *m, void *v)
318 {
319         struct ftrace_event_call *call = v;
320
321         if (strcmp(call->system, TRACE_SYSTEM) != 0)
322                 seq_printf(m, "%s:", call->system);
323         seq_printf(m, "%s\n", call->name);
324
325         return 0;
326 }
327
328 static void t_stop(struct seq_file *m, void *p)
329 {
330         mutex_unlock(&event_mutex);
331 }
332
333 static int
334 ftrace_event_seq_open(struct inode *inode, struct file *file)
335 {
336         const struct seq_operations *seq_ops;
337
338         if ((file->f_mode & FMODE_WRITE) &&
339             !(file->f_flags & O_APPEND))
340                 ftrace_clear_events();
341
342         seq_ops = inode->i_private;
343         return seq_open(file, seq_ops);
344 }
345
346 static ssize_t
347 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
348                   loff_t *ppos)
349 {
350         struct ftrace_event_call *call = filp->private_data;
351         char *buf;
352
353         if (call->enabled)
354                 buf = "1\n";
355         else
356                 buf = "0\n";
357
358         return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
359 }
360
361 static ssize_t
362 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
363                    loff_t *ppos)
364 {
365         struct ftrace_event_call *call = filp->private_data;
366         char buf[64];
367         unsigned long val;
368         int ret;
369
370         if (cnt >= sizeof(buf))
371                 return -EINVAL;
372
373         if (copy_from_user(&buf, ubuf, cnt))
374                 return -EFAULT;
375
376         buf[cnt] = 0;
377
378         ret = strict_strtoul(buf, 10, &val);
379         if (ret < 0)
380                 return ret;
381
382         ret = tracing_update_buffers();
383         if (ret < 0)
384                 return ret;
385
386         switch (val) {
387         case 0:
388         case 1:
389                 mutex_lock(&event_mutex);
390                 ftrace_event_enable_disable(call, val);
391                 mutex_unlock(&event_mutex);
392                 break;
393
394         default:
395                 return -EINVAL;
396         }
397
398         *ppos += cnt;
399
400         return cnt;
401 }
402
403 extern char *__bad_type_size(void);
404
405 #undef FIELD
406 #define FIELD(type, name)                                               \
407         sizeof(type) != sizeof(field.name) ? __bad_type_size() :        \
408         #type, "common_" #name, offsetof(typeof(field), name),          \
409                 sizeof(field.name)
410
411 static int trace_write_header(struct trace_seq *s)
412 {
413         struct trace_entry field;
414
415         /* struct trace_entry */
416         return trace_seq_printf(s,
417                                 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
418                                 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
419                                 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
420                                 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
421                                 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
422                                 "\n",
423                                 FIELD(unsigned short, type),
424                                 FIELD(unsigned char, flags),
425                                 FIELD(unsigned char, preempt_count),
426                                 FIELD(int, pid),
427                                 FIELD(int, tgid));
428 }
429
430 static ssize_t
431 event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
432                   loff_t *ppos)
433 {
434         struct ftrace_event_call *call = filp->private_data;
435         struct trace_seq *s;
436         char *buf;
437         int r;
438
439         if (*ppos)
440                 return 0;
441
442         s = kmalloc(sizeof(*s), GFP_KERNEL);
443         if (!s)
444                 return -ENOMEM;
445
446         trace_seq_init(s);
447
448         /* If any of the first writes fail, so will the show_format. */
449
450         trace_seq_printf(s, "name: %s\n", call->name);
451         trace_seq_printf(s, "ID: %d\n", call->id);
452         trace_seq_printf(s, "format:\n");
453         trace_write_header(s);
454
455         r = call->show_format(s);
456         if (!r) {
457                 /*
458                  * ug!  The format output is bigger than a PAGE!!
459                  */
460                 buf = "FORMAT TOO BIG\n";
461                 r = simple_read_from_buffer(ubuf, cnt, ppos,
462                                               buf, strlen(buf));
463                 goto out;
464         }
465
466         r = simple_read_from_buffer(ubuf, cnt, ppos,
467                                     s->buffer, s->len);
468  out:
469         kfree(s);
470         return r;
471 }
472
473 static ssize_t
474 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
475 {
476         struct ftrace_event_call *call = filp->private_data;
477         struct trace_seq *s;
478         int r;
479
480         if (*ppos)
481                 return 0;
482
483         s = kmalloc(sizeof(*s), GFP_KERNEL);
484         if (!s)
485                 return -ENOMEM;
486
487         trace_seq_init(s);
488         trace_seq_printf(s, "%d\n", call->id);
489
490         r = simple_read_from_buffer(ubuf, cnt, ppos,
491                                     s->buffer, s->len);
492         kfree(s);
493         return r;
494 }
495
496 static ssize_t
497 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
498                   loff_t *ppos)
499 {
500         struct ftrace_event_call *call = filp->private_data;
501         struct trace_seq *s;
502         int r;
503
504         if (*ppos)
505                 return 0;
506
507         s = kmalloc(sizeof(*s), GFP_KERNEL);
508         if (!s)
509                 return -ENOMEM;
510
511         trace_seq_init(s);
512
513         print_event_filter(call, s);
514         r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
515
516         kfree(s);
517
518         return r;
519 }
520
521 static ssize_t
522 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
523                    loff_t *ppos)
524 {
525         struct ftrace_event_call *call = filp->private_data;
526         char *buf;
527         int err;
528
529         if (cnt >= PAGE_SIZE)
530                 return -EINVAL;
531
532         buf = (char *)__get_free_page(GFP_TEMPORARY);
533         if (!buf)
534                 return -ENOMEM;
535
536         if (copy_from_user(buf, ubuf, cnt)) {
537                 free_page((unsigned long) buf);
538                 return -EFAULT;
539         }
540         buf[cnt] = '\0';
541
542         err = apply_event_filter(call, buf);
543         free_page((unsigned long) buf);
544         if (err < 0)
545                 return err;
546
547         *ppos += cnt;
548
549         return cnt;
550 }
551
552 static ssize_t
553 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
554                       loff_t *ppos)
555 {
556         struct event_subsystem *system = filp->private_data;
557         struct trace_seq *s;
558         int r;
559
560         if (*ppos)
561                 return 0;
562
563         s = kmalloc(sizeof(*s), GFP_KERNEL);
564         if (!s)
565                 return -ENOMEM;
566
567         trace_seq_init(s);
568
569         print_subsystem_event_filter(system, s);
570         r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
571
572         kfree(s);
573
574         return r;
575 }
576
577 static ssize_t
578 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
579                        loff_t *ppos)
580 {
581         struct event_subsystem *system = filp->private_data;
582         char *buf;
583         int err;
584
585         if (cnt >= PAGE_SIZE)
586                 return -EINVAL;
587
588         buf = (char *)__get_free_page(GFP_TEMPORARY);
589         if (!buf)
590                 return -ENOMEM;
591
592         if (copy_from_user(buf, ubuf, cnt)) {
593                 free_page((unsigned long) buf);
594                 return -EFAULT;
595         }
596         buf[cnt] = '\0';
597
598         err = apply_subsystem_event_filter(system, buf);
599         free_page((unsigned long) buf);
600         if (err < 0)
601                 return err;
602
603         *ppos += cnt;
604
605         return cnt;
606 }
607
608 static ssize_t
609 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
610 {
611         int (*func)(struct trace_seq *s) = filp->private_data;
612         struct trace_seq *s;
613         int r;
614
615         if (*ppos)
616                 return 0;
617
618         s = kmalloc(sizeof(*s), GFP_KERNEL);
619         if (!s)
620                 return -ENOMEM;
621
622         trace_seq_init(s);
623
624         func(s);
625         r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
626
627         kfree(s);
628
629         return r;
630 }
631
632 static const struct seq_operations show_event_seq_ops = {
633         .start = t_start,
634         .next = t_next,
635         .show = t_show,
636         .stop = t_stop,
637 };
638
639 static const struct seq_operations show_set_event_seq_ops = {
640         .start = s_start,
641         .next = s_next,
642         .show = t_show,
643         .stop = t_stop,
644 };
645
646 static const struct file_operations ftrace_avail_fops = {
647         .open = ftrace_event_seq_open,
648         .read = seq_read,
649         .llseek = seq_lseek,
650         .release = seq_release,
651 };
652
653 static const struct file_operations ftrace_set_event_fops = {
654         .open = ftrace_event_seq_open,
655         .read = seq_read,
656         .write = ftrace_event_write,
657         .llseek = seq_lseek,
658         .release = seq_release,
659 };
660
661 static const struct file_operations ftrace_enable_fops = {
662         .open = tracing_open_generic,
663         .read = event_enable_read,
664         .write = event_enable_write,
665 };
666
667 static const struct file_operations ftrace_event_format_fops = {
668         .open = tracing_open_generic,
669         .read = event_format_read,
670 };
671
672 static const struct file_operations ftrace_event_id_fops = {
673         .open = tracing_open_generic,
674         .read = event_id_read,
675 };
676
677 static const struct file_operations ftrace_event_filter_fops = {
678         .open = tracing_open_generic,
679         .read = event_filter_read,
680         .write = event_filter_write,
681 };
682
683 static const struct file_operations ftrace_subsystem_filter_fops = {
684         .open = tracing_open_generic,
685         .read = subsystem_filter_read,
686         .write = subsystem_filter_write,
687 };
688
689 static const struct file_operations ftrace_show_header_fops = {
690         .open = tracing_open_generic,
691         .read = show_header,
692 };
693
694 static struct dentry *event_trace_events_dir(void)
695 {
696         static struct dentry *d_tracer;
697         static struct dentry *d_events;
698
699         if (d_events)
700                 return d_events;
701
702         d_tracer = tracing_init_dentry();
703         if (!d_tracer)
704                 return NULL;
705
706         d_events = debugfs_create_dir("events", d_tracer);
707         if (!d_events)
708                 pr_warning("Could not create debugfs "
709                            "'events' directory\n");
710
711         return d_events;
712 }
713
714 static LIST_HEAD(event_subsystems);
715
716 static struct dentry *
717 event_subsystem_dir(const char *name, struct dentry *d_events)
718 {
719         struct event_subsystem *system;
720         struct dentry *entry;
721
722         /* First see if we did not already create this dir */
723         list_for_each_entry(system, &event_subsystems, list) {
724                 if (strcmp(system->name, name) == 0)
725                         return system->entry;
726         }
727
728         /* need to create new entry */
729         system = kmalloc(sizeof(*system), GFP_KERNEL);
730         if (!system) {
731                 pr_warning("No memory to create event subsystem %s\n",
732                            name);
733                 return d_events;
734         }
735
736         system->entry = debugfs_create_dir(name, d_events);
737         if (!system->entry) {
738                 pr_warning("Could not create event subsystem %s\n",
739                            name);
740                 kfree(system);
741                 return d_events;
742         }
743
744         system->name = kstrdup(name, GFP_KERNEL);
745         if (!system->name) {
746                 debugfs_remove(system->entry);
747                 kfree(system);
748                 return d_events;
749         }
750
751         list_add(&system->list, &event_subsystems);
752
753         system->filter = NULL;
754
755         system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
756         if (!system->filter) {
757                 pr_warning("Could not allocate filter for subsystem "
758                            "'%s'\n", name);
759                 return system->entry;
760         }
761
762         entry = debugfs_create_file("filter", 0644, system->entry, system,
763                                     &ftrace_subsystem_filter_fops);
764         if (!entry) {
765                 kfree(system->filter);
766                 system->filter = NULL;
767                 pr_warning("Could not create debugfs "
768                            "'%s/filter' entry\n", name);
769         }
770
771         return system->entry;
772 }
773
774 static int
775 event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
776                  const struct file_operations *id,
777                  const struct file_operations *enable,
778                  const struct file_operations *filter,
779                  const struct file_operations *format)
780 {
781         struct dentry *entry;
782         int ret;
783
784         /*
785          * If the trace point header did not define TRACE_SYSTEM
786          * then the system would be called "TRACE_SYSTEM".
787          */
788         if (strcmp(call->system, TRACE_SYSTEM) != 0)
789                 d_events = event_subsystem_dir(call->system, d_events);
790
791         if (call->raw_init) {
792                 ret = call->raw_init();
793                 if (ret < 0) {
794                         pr_warning("Could not initialize trace point"
795                                    " events/%s\n", call->name);
796                         return ret;
797                 }
798         }
799
800         call->dir = debugfs_create_dir(call->name, d_events);
801         if (!call->dir) {
802                 pr_warning("Could not create debugfs "
803                            "'%s' directory\n", call->name);
804                 return -1;
805         }
806
807         if (call->regfunc)
808                 entry = trace_create_file("enable", 0644, call->dir, call,
809                                           enable);
810
811         if (call->id)
812                 entry = trace_create_file("id", 0444, call->dir, call,
813                                           id);
814
815         if (call->define_fields) {
816                 ret = call->define_fields();
817                 if (ret < 0) {
818                         pr_warning("Could not initialize trace point"
819                                    " events/%s\n", call->name);
820                         return ret;
821                 }
822                 entry = trace_create_file("filter", 0644, call->dir, call,
823                                           filter);
824         }
825
826         /* A trace may not want to export its format */
827         if (!call->show_format)
828                 return 0;
829
830         entry = trace_create_file("format", 0444, call->dir, call,
831                                   format);
832
833         return 0;
834 }
835
836 #define for_each_event(event, start, end)                       \
837         for (event = start;                                     \
838              (unsigned long)event < (unsigned long)end;         \
839              event++)
840
841 #ifdef CONFIG_MODULES
842
843 static LIST_HEAD(ftrace_module_file_list);
844
845 /*
846  * Modules must own their file_operations to keep up with
847  * reference counting.
848  */
849 struct ftrace_module_file_ops {
850         struct list_head                list;
851         struct module                   *mod;
852         struct file_operations          id;
853         struct file_operations          enable;
854         struct file_operations          format;
855         struct file_operations          filter;
856 };
857
858 static struct ftrace_module_file_ops *
859 trace_create_file_ops(struct module *mod)
860 {
861         struct ftrace_module_file_ops *file_ops;
862
863         /*
864          * This is a bit of a PITA. To allow for correct reference
865          * counting, modules must "own" their file_operations.
866          * To do this, we allocate the file operations that will be
867          * used in the event directory.
868          */
869
870         file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
871         if (!file_ops)
872                 return NULL;
873
874         file_ops->mod = mod;
875
876         file_ops->id = ftrace_event_id_fops;
877         file_ops->id.owner = mod;
878
879         file_ops->enable = ftrace_enable_fops;
880         file_ops->enable.owner = mod;
881
882         file_ops->filter = ftrace_event_filter_fops;
883         file_ops->filter.owner = mod;
884
885         file_ops->format = ftrace_event_format_fops;
886         file_ops->format.owner = mod;
887
888         list_add(&file_ops->list, &ftrace_module_file_list);
889
890         return file_ops;
891 }
892
893 static void trace_module_add_events(struct module *mod)
894 {
895         struct ftrace_module_file_ops *file_ops = NULL;
896         struct ftrace_event_call *call, *start, *end;
897         struct dentry *d_events;
898
899         start = mod->trace_events;
900         end = mod->trace_events + mod->num_trace_events;
901
902         if (start == end)
903                 return;
904
905         d_events = event_trace_events_dir();
906         if (!d_events)
907                 return;
908
909         for_each_event(call, start, end) {
910                 /* The linker may leave blanks */
911                 if (!call->name)
912                         continue;
913
914                 /*
915                  * This module has events, create file ops for this module
916                  * if not already done.
917                  */
918                 if (!file_ops) {
919                         file_ops = trace_create_file_ops(mod);
920                         if (!file_ops)
921                                 return;
922                 }
923                 call->mod = mod;
924                 list_add(&call->list, &ftrace_events);
925                 event_create_dir(call, d_events,
926                                  &file_ops->id, &file_ops->enable,
927                                  &file_ops->filter, &file_ops->format);
928         }
929 }
930
931 static void trace_module_remove_events(struct module *mod)
932 {
933         struct ftrace_module_file_ops *file_ops;
934         struct ftrace_event_call *call, *p;
935
936         list_for_each_entry_safe(call, p, &ftrace_events, list) {
937                 if (call->mod == mod) {
938                         if (call->enabled) {
939                                 call->enabled = 0;
940                                 call->unregfunc();
941                         }
942                         if (call->event)
943                                 unregister_ftrace_event(call->event);
944                         debugfs_remove_recursive(call->dir);
945                         list_del(&call->list);
946                         trace_destroy_fields(call);
947                         destroy_preds(call);
948                 }
949         }
950
951         /* Now free the file_operations */
952         list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
953                 if (file_ops->mod == mod)
954                         break;
955         }
956         if (&file_ops->list != &ftrace_module_file_list) {
957                 list_del(&file_ops->list);
958                 kfree(file_ops);
959         }
960 }
961
962 static int trace_module_notify(struct notifier_block *self,
963                                unsigned long val, void *data)
964 {
965         struct module *mod = data;
966
967         mutex_lock(&event_mutex);
968         switch (val) {
969         case MODULE_STATE_COMING:
970                 trace_module_add_events(mod);
971                 break;
972         case MODULE_STATE_GOING:
973                 trace_module_remove_events(mod);
974                 break;
975         }
976         mutex_unlock(&event_mutex);
977
978         return 0;
979 }
980 #else
981 static int trace_module_notify(struct notifier_block *self,
982                                unsigned long val, void *data)
983 {
984         return 0;
985 }
986 #endif /* CONFIG_MODULES */
987
988 struct notifier_block trace_module_nb = {
989         .notifier_call = trace_module_notify,
990         .priority = 0,
991 };
992
993 extern struct ftrace_event_call __start_ftrace_events[];
994 extern struct ftrace_event_call __stop_ftrace_events[];
995
996 static __init int event_trace_init(void)
997 {
998         struct ftrace_event_call *call;
999         struct dentry *d_tracer;
1000         struct dentry *entry;
1001         struct dentry *d_events;
1002         int ret;
1003
1004         d_tracer = tracing_init_dentry();
1005         if (!d_tracer)
1006                 return 0;
1007
1008         entry = debugfs_create_file("available_events", 0444, d_tracer,
1009                                     (void *)&show_event_seq_ops,
1010                                     &ftrace_avail_fops);
1011         if (!entry)
1012                 pr_warning("Could not create debugfs "
1013                            "'available_events' entry\n");
1014
1015         entry = debugfs_create_file("set_event", 0644, d_tracer,
1016                                     (void *)&show_set_event_seq_ops,
1017                                     &ftrace_set_event_fops);
1018         if (!entry)
1019                 pr_warning("Could not create debugfs "
1020                            "'set_event' entry\n");
1021
1022         d_events = event_trace_events_dir();
1023         if (!d_events)
1024                 return 0;
1025
1026         /* ring buffer internal formats */
1027         trace_create_file("header_page", 0444, d_events,
1028                           ring_buffer_print_page_header,
1029                           &ftrace_show_header_fops);
1030
1031         trace_create_file("header_event", 0444, d_events,
1032                           ring_buffer_print_entry_header,
1033                           &ftrace_show_header_fops);
1034
1035         for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
1036                 /* The linker may leave blanks */
1037                 if (!call->name)
1038                         continue;
1039                 list_add(&call->list, &ftrace_events);
1040                 event_create_dir(call, d_events, &ftrace_event_id_fops,
1041                                  &ftrace_enable_fops, &ftrace_event_filter_fops,
1042                                  &ftrace_event_format_fops);
1043         }
1044
1045         ret = register_module_notifier(&trace_module_nb);
1046         if (!ret)
1047                 pr_warning("Failed to register trace events module notifier\n");
1048
1049         return 0;
1050 }
1051 fs_initcall(event_trace_init);
1052
1053 #ifdef CONFIG_FTRACE_STARTUP_TEST
1054
1055 static DEFINE_SPINLOCK(test_spinlock);
1056 static DEFINE_SPINLOCK(test_spinlock_irq);
1057 static DEFINE_MUTEX(test_mutex);
1058
1059 static __init void test_work(struct work_struct *dummy)
1060 {
1061         spin_lock(&test_spinlock);
1062         spin_lock_irq(&test_spinlock_irq);
1063         udelay(1);
1064         spin_unlock_irq(&test_spinlock_irq);
1065         spin_unlock(&test_spinlock);
1066
1067         mutex_lock(&test_mutex);
1068         msleep(1);
1069         mutex_unlock(&test_mutex);
1070 }
1071
1072 static __init int event_test_thread(void *unused)
1073 {
1074         void *test_malloc;
1075
1076         test_malloc = kmalloc(1234, GFP_KERNEL);
1077         if (!test_malloc)
1078                 pr_info("failed to kmalloc\n");
1079
1080         schedule_on_each_cpu(test_work);
1081
1082         kfree(test_malloc);
1083
1084         set_current_state(TASK_INTERRUPTIBLE);
1085         while (!kthread_should_stop())
1086                 schedule();
1087
1088         return 0;
1089 }
1090
1091 /*
1092  * Do various things that may trigger events.
1093  */
1094 static __init void event_test_stuff(void)
1095 {
1096         struct task_struct *test_thread;
1097
1098         test_thread = kthread_run(event_test_thread, NULL, "test-events");
1099         msleep(1);
1100         kthread_stop(test_thread);
1101 }
1102
1103 /*
1104  * For every trace event defined, we will test each trace point separately,
1105  * and then by groups, and finally all trace points.
1106  */
1107 static __init void event_trace_self_tests(void)
1108 {
1109         struct ftrace_event_call *call;
1110         struct event_subsystem *system;
1111         char *sysname;
1112         int ret;
1113
1114         pr_info("Running tests on trace events:\n");
1115
1116         list_for_each_entry(call, &ftrace_events, list) {
1117
1118                 /* Only test those that have a regfunc */
1119                 if (!call->regfunc)
1120                         continue;
1121
1122                 pr_info("Testing event %s: ", call->name);
1123
1124                 /*
1125                  * If an event is already enabled, someone is using
1126                  * it and the self test should not be on.
1127                  */
1128                 if (call->enabled) {
1129                         pr_warning("Enabled event during self test!\n");
1130                         WARN_ON_ONCE(1);
1131                         continue;
1132                 }
1133
1134                 call->enabled = 1;
1135                 call->regfunc();
1136
1137                 event_test_stuff();
1138
1139                 call->unregfunc();
1140                 call->enabled = 0;
1141
1142                 pr_cont("OK\n");
1143         }
1144
1145         /* Now test at the sub system level */
1146
1147         pr_info("Running tests on trace event systems:\n");
1148
1149         list_for_each_entry(system, &event_subsystems, list) {
1150
1151                 /* the ftrace system is special, skip it */
1152                 if (strcmp(system->name, "ftrace") == 0)
1153                         continue;
1154
1155                 pr_info("Testing event system %s: ", system->name);
1156
1157                 /* ftrace_set_clr_event can modify the name passed in. */
1158                 sysname = kstrdup(system->name, GFP_KERNEL);
1159                 if (WARN_ON(!sysname)) {
1160                         pr_warning("Can't allocate memory, giving up!\n");
1161                         return;
1162                 }
1163                 ret = ftrace_set_clr_event(sysname, 1);
1164                 kfree(sysname);
1165                 if (WARN_ON_ONCE(ret)) {
1166                         pr_warning("error enabling system %s\n",
1167                                    system->name);
1168                         continue;
1169                 }
1170
1171                 event_test_stuff();
1172
1173                 sysname = kstrdup(system->name, GFP_KERNEL);
1174                 if (WARN_ON(!sysname)) {
1175                         pr_warning("Can't allocate memory, giving up!\n");
1176                         return;
1177                 }
1178                 ret = ftrace_set_clr_event(sysname, 0);
1179                 kfree(sysname);
1180
1181                 if (WARN_ON_ONCE(ret))
1182                         pr_warning("error disabling system %s\n",
1183                                    system->name);
1184
1185                 pr_cont("OK\n");
1186         }
1187
1188         /* Test with all events enabled */
1189
1190         pr_info("Running tests on all trace events:\n");
1191         pr_info("Testing all events: ");
1192
1193         sysname = kmalloc(4, GFP_KERNEL);
1194         if (WARN_ON(!sysname)) {
1195                 pr_warning("Can't allocate memory, giving up!\n");
1196                 return;
1197         }
1198         memcpy(sysname, "*:*", 4);
1199         ret = ftrace_set_clr_event(sysname, 1);
1200         if (WARN_ON_ONCE(ret)) {
1201                 kfree(sysname);
1202                 pr_warning("error enabling all events\n");
1203                 return;
1204         }
1205
1206         event_test_stuff();
1207
1208         /* reset sysname */
1209         memcpy(sysname, "*:*", 4);
1210         ret = ftrace_set_clr_event(sysname, 0);
1211         kfree(sysname);
1212
1213         if (WARN_ON_ONCE(ret)) {
1214                 pr_warning("error disabling all events\n");
1215                 return;
1216         }
1217
1218         pr_cont("OK\n");
1219 }
1220
1221 #ifdef CONFIG_FUNCTION_TRACER
1222
1223 static DEFINE_PER_CPU(atomic_t, test_event_disable);
1224
1225 static void
1226 function_test_events_call(unsigned long ip, unsigned long parent_ip)
1227 {
1228         struct ring_buffer_event *event;
1229         struct ftrace_entry *entry;
1230         unsigned long flags;
1231         long disabled;
1232         int resched;
1233         int cpu;
1234         int pc;
1235
1236         pc = preempt_count();
1237         resched = ftrace_preempt_disable();
1238         cpu = raw_smp_processor_id();
1239         disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu));
1240
1241         if (disabled != 1)
1242                 goto out;
1243
1244         local_save_flags(flags);
1245
1246         event = trace_current_buffer_lock_reserve(TRACE_FN, sizeof(*entry),
1247                                                   flags, pc);
1248         if (!event)
1249                 goto out;
1250         entry   = ring_buffer_event_data(event);
1251         entry->ip                       = ip;
1252         entry->parent_ip                = parent_ip;
1253
1254         trace_nowake_buffer_unlock_commit(event, flags, pc);
1255
1256  out:
1257         atomic_dec(&per_cpu(test_event_disable, cpu));
1258         ftrace_preempt_enable(resched);
1259 }
1260
1261 static struct ftrace_ops trace_ops __initdata  =
1262 {
1263         .func = function_test_events_call,
1264 };
1265
1266 static __init void event_trace_self_test_with_function(void)
1267 {
1268         register_ftrace_function(&trace_ops);
1269         pr_info("Running tests again, along with the function tracer\n");
1270         event_trace_self_tests();
1271         unregister_ftrace_function(&trace_ops);
1272 }
1273 #else
1274 static __init void event_trace_self_test_with_function(void)
1275 {
1276 }
1277 #endif
1278
1279 static __init int event_trace_self_tests_init(void)
1280 {
1281
1282         event_trace_self_tests();
1283
1284         event_trace_self_test_with_function();
1285
1286         return 0;
1287 }
1288
1289 late_initcall(event_trace_self_tests_init);
1290
1291 #endif