kgdb: gdb "monitor" -> kdb passthrough
[safe/jmp/linux-2.6] / kernel / trace / trace_events.c
index 97470c4..c697c70 100644 (file)
  *
  */
 
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include <linux/kthread.h>
 #include <linux/debugfs.h>
 #include <linux/uaccess.h>
 #include <linux/module.h>
 #include <linux/ctype.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+
+#include <asm/setup.h>
 
 #include "trace_output.h"
 
+#undef TRACE_SYSTEM
 #define TRACE_SYSTEM "TRACE_SYSTEM"
 
-static DEFINE_MUTEX(event_mutex);
+DEFINE_MUTEX(event_mutex);
+
+LIST_HEAD(ftrace_events);
 
-int trace_define_field(struct ftrace_event_call *call, char *type,
-                      char *name, int offset, int size)
+int trace_define_field(struct ftrace_event_call *call, const char *type,
+                      const char *name, int offset, int size, int is_signed,
+                      int filter_type)
 {
        struct ftrace_event_field *field;
 
-       field = kmalloc(sizeof(*field), GFP_KERNEL);
+       field = kzalloc(sizeof(*field), GFP_KERNEL);
        if (!field)
                goto err;
+
        field->name = kstrdup(name, GFP_KERNEL);
        if (!field->name)
                goto err;
+
        field->type = kstrdup(type, GFP_KERNEL);
        if (!field->type)
                goto err;
+
+       if (filter_type == FILTER_OTHER)
+               field->filter_type = filter_assign_type(type);
+       else
+               field->filter_type = filter_type;
+
        field->offset = offset;
        field->size = size;
+       field->is_signed = is_signed;
+
        list_add(&field->link, &call->fields);
 
        return 0;
+
 err:
-       if (field) {
+       if (field)
                kfree(field->name);
-               kfree(field->type);
-       }
        kfree(field);
+
        return -ENOMEM;
 }
+EXPORT_SYMBOL_GPL(trace_define_field);
+
+#define __common_field(type, item)                                     \
+       ret = trace_define_field(call, #type, "common_" #item,          \
+                                offsetof(typeof(ent), item),           \
+                                sizeof(ent.item),                      \
+                                is_signed_type(type), FILTER_OTHER);   \
+       if (ret)                                                        \
+               return ret;
 
-static void ftrace_clear_events(void)
+static int trace_define_common_fields(struct ftrace_event_call *call)
 {
-       struct ftrace_event_call *call = (void *)__start_ftrace_events;
+       int ret;
+       struct trace_entry ent;
+
+       __common_field(unsigned short, type);
+       __common_field(unsigned char, flags);
+       __common_field(unsigned char, preempt_count);
+       __common_field(int, pid);
+       __common_field(int, lock_depth);
 
+       return ret;
+}
 
-       while ((unsigned long)call < (unsigned long)__stop_ftrace_events) {
+void trace_destroy_fields(struct ftrace_event_call *call)
+{
+       struct ftrace_event_field *field, *next;
 
-               if (call->enabled) {
-                       call->enabled = 0;
-                       call->unregfunc();
-               }
-               call++;
+       list_for_each_entry_safe(field, next, &call->fields, link) {
+               list_del(&field->link);
+               kfree(field->type);
+               kfree(field->name);
+               kfree(field);
        }
 }
 
-static void ftrace_event_enable_disable(struct ftrace_event_call *call,
+int trace_event_raw_init(struct ftrace_event_call *call)
+{
+       int id;
+
+       id = register_ftrace_event(call->event);
+       if (!id)
+               return -ENODEV;
+       call->id = id;
+       INIT_LIST_HEAD(&call->fields);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(trace_event_raw_init);
+
+static int ftrace_event_enable_disable(struct ftrace_event_call *call,
                                        int enable)
 {
+       int ret = 0;
 
        switch (enable) {
        case 0:
                if (call->enabled) {
                        call->enabled = 0;
-                       call->unregfunc();
+                       tracing_stop_cmdline_record();
+                       call->unregfunc(call);
                }
                break;
        case 1:
                if (!call->enabled) {
+                       tracing_start_cmdline_record();
+                       ret = call->regfunc(call);
+                       if (ret) {
+                               tracing_stop_cmdline_record();
+                               pr_info("event trace: Could not enable event "
+                                       "%s\n", call->name);
+                               break;
+                       }
                        call->enabled = 1;
-                       call->regfunc();
                }
                break;
        }
+
+       return ret;
+}
+
+static void ftrace_clear_events(void)
+{
+       struct ftrace_event_call *call;
+
+       mutex_lock(&event_mutex);
+       list_for_each_entry(call, &ftrace_events, list) {
+               ftrace_event_enable_disable(call, 0);
+       }
+       mutex_unlock(&event_mutex);
+}
+
+/*
+ * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
+ */
+static int __ftrace_set_clr_event(const char *match, const char *sub,
+                                 const char *event, int set)
+{
+       struct ftrace_event_call *call;
+       int ret = -EINVAL;
+
+       mutex_lock(&event_mutex);
+       list_for_each_entry(call, &ftrace_events, list) {
+
+               if (!call->name || !call->regfunc)
+                       continue;
+
+               if (match &&
+                   strcmp(match, call->name) != 0 &&
+                   strcmp(match, call->system) != 0)
+                       continue;
+
+               if (sub && strcmp(sub, call->system) != 0)
+                       continue;
+
+               if (event && strcmp(event, call->name) != 0)
+                       continue;
+
+               ftrace_event_enable_disable(call, set);
+
+               ret = 0;
+       }
+       mutex_unlock(&event_mutex);
+
+       return ret;
 }
 
 static int ftrace_set_clr_event(char *buf, int set)
 {
-       struct ftrace_event_call *call = __start_ftrace_events;
        char *event = NULL, *sub = NULL, *match;
-       int ret = -EINVAL;
 
        /*
         * The buf format can be <subsystem>:<event-name>
@@ -112,30 +222,24 @@ static int ftrace_set_clr_event(char *buf, int set)
                        event = NULL;
        }
 
-       mutex_lock(&event_mutex);
-       for_each_event(call) {
-
-               if (!call->name || !call->regfunc)
-                       continue;
-
-               if (match &&
-                   strcmp(match, call->name) != 0 &&
-                   strcmp(match, call->system) != 0)
-                       continue;
-
-               if (sub && strcmp(sub, call->system) != 0)
-                       continue;
-
-               if (event && strcmp(event, call->name) != 0)
-                       continue;
-
-               ftrace_event_enable_disable(call, set);
-
-               ret = 0;
-       }
-       mutex_unlock(&event_mutex);
+       return __ftrace_set_clr_event(match, sub, event, set);
+}
 
-       return ret;
+/**
+ * trace_set_clr_event - enable or disable an event
+ * @system: system name to match (NULL for any system)
+ * @event: event name to match (NULL for all events, within system)
+ * @set: 1 to enable, 0 to disable
+ *
+ * This is a way for other parts of the kernel to enable or disable
+ * event recording.
+ *
+ * Returns 0 on success, -EINVAL if the parameters do not match any
+ * registered events.
+ */
+int trace_set_clr_event(const char *system, const char *event, int set)
+{
+       return __ftrace_set_clr_event(NULL, system, event, set);
 }
 
 /* 128 should be much more than enough */
@@ -145,73 +249,38 @@ static ssize_t
 ftrace_event_write(struct file *file, const char __user *ubuf,
                   size_t cnt, loff_t *ppos)
 {
-       size_t read = 0;
-       int i, set = 1;
-       ssize_t ret;
-       char *buf;
-       char ch;
+       struct trace_parser parser;
+       ssize_t read, ret;
 
-       if (!cnt || cnt < 0)
+       if (!cnt)
                return 0;
 
        ret = tracing_update_buffers();
        if (ret < 0)
                return ret;
 
-       ret = get_user(ch, ubuf++);
-       if (ret)
-               return ret;
-       read++;
-       cnt--;
-
-       /* skip white space */
-       while (cnt && isspace(ch)) {
-               ret = get_user(ch, ubuf++);
-               if (ret)
-                       return ret;
-               read++;
-               cnt--;
-       }
-
-       /* Only white space found? */
-       if (isspace(ch)) {
-               file->f_pos += read;
-               ret = read;
-               return ret;
-       }
-
-       buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL);
-       if (!buf)
+       if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
                return -ENOMEM;
 
-       if (cnt > EVENT_BUF_SIZE)
-               cnt = EVENT_BUF_SIZE;
+       read = trace_get_user(&parser, ubuf, cnt, ppos);
+
+       if (read >= 0 && trace_parser_loaded((&parser))) {
+               int set = 1;
 
-       i = 0;
-       while (cnt && !isspace(ch)) {
-               if (!i && ch == '!')
+               if (*parser.buffer == '!')
                        set = 0;
-               else
-                       buf[i++] = ch;
 
-               ret = get_user(ch, ubuf++);
+               parser.buffer[parser.idx] = 0;
+
+               ret = ftrace_set_clr_event(parser.buffer + !set, set);
                if (ret)
-                       goto out_free;
-               read++;
-               cnt--;
+                       goto out_put;
        }
-       buf[i] = 0;
-
-       file->f_pos += read;
-
-       ret = ftrace_set_clr_event(buf, set);
-       if (ret)
-               goto out_free;
 
        ret = read;
 
- out_free:
-       kfree(buf);
+ out_put:
+       trace_parser_put(&parser);
 
        return ret;
 }
@@ -219,62 +288,67 @@ ftrace_event_write(struct file *file, const char __user *ubuf,
 static void *
 t_next(struct seq_file *m, void *v, loff_t *pos)
 {
-       struct ftrace_event_call *call = m->private;
-       struct ftrace_event_call *next = call;
+       struct ftrace_event_call *call = v;
 
        (*pos)++;
 
-       for (;;) {
-               if ((unsigned long)call >= (unsigned long)__stop_ftrace_events)
-                       return NULL;
-
+       list_for_each_entry_continue(call, &ftrace_events, list) {
                /*
                 * The ftrace subsystem is for showing formats only.
                 * They can not be enabled or disabled via the event files.
                 */
                if (call->regfunc)
-                       break;
-
-               call++;
-               next = call;
+                       return call;
        }
 
-       m->private = ++next;
-
-       return call;
+       return NULL;
 }
 
 static void *t_start(struct seq_file *m, loff_t *pos)
 {
-       return t_next(m, NULL, pos);
+       struct ftrace_event_call *call;
+       loff_t l;
+
+       mutex_lock(&event_mutex);
+
+       call = list_entry(&ftrace_events, struct ftrace_event_call, list);
+       for (l = 0; l <= *pos; ) {
+               call = t_next(m, call, &l);
+               if (!call)
+                       break;
+       }
+       return call;
 }
 
 static void *
 s_next(struct seq_file *m, void *v, loff_t *pos)
 {
-       struct ftrace_event_call *call = m->private;
-       struct ftrace_event_call *next;
+       struct ftrace_event_call *call = v;
 
        (*pos)++;
 
- retry:
-       if ((unsigned long)call >= (unsigned long)__stop_ftrace_events)
-               return NULL;
-
-       if (!call->enabled) {
-               call++;
-               goto retry;
+       list_for_each_entry_continue(call, &ftrace_events, list) {
+               if (call->enabled)
+                       return call;
        }
 
-       next = call;
-       m->private = ++next;
-
-       return call;
+       return NULL;
 }
 
 static void *s_start(struct seq_file *m, loff_t *pos)
 {
-       return s_next(m, NULL, pos);
+       struct ftrace_event_call *call;
+       loff_t l;
+
+       mutex_lock(&event_mutex);
+
+       call = list_entry(&ftrace_events, struct ftrace_event_call, list);
+       for (l = 0; l <= *pos; ) {
+               call = s_next(m, call, &l);
+               if (!call)
+                       break;
+       }
+       return call;
 }
 
 static int t_show(struct seq_file *m, void *v)
@@ -290,26 +364,20 @@ static int t_show(struct seq_file *m, void *v)
 
 static void t_stop(struct seq_file *m, void *p)
 {
+       mutex_unlock(&event_mutex);
 }
 
 static int
 ftrace_event_seq_open(struct inode *inode, struct file *file)
 {
-       int ret;
        const struct seq_operations *seq_ops;
 
        if ((file->f_mode & FMODE_WRITE) &&
-           !(file->f_flags & O_APPEND))
+           (file->f_flags & O_TRUNC))
                ftrace_clear_events();
 
        seq_ops = inode->i_private;
-       ret = seq_open(file, seq_ops);
-       if (!ret) {
-               struct seq_file *m = file->private_data;
-
-               m->private = __start_ftrace_events;
-       }
-       return ret;
+       return seq_open(file, seq_ops);
 }
 
 static ssize_t
@@ -356,7 +424,7 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
        case 0:
        case 1:
                mutex_lock(&event_mutex);
-               ftrace_event_enable_disable(call, val);
+               ret = ftrace_event_enable_disable(call, val);
                mutex_unlock(&event_mutex);
                break;
 
@@ -366,31 +434,89 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
 
        *ppos += cnt;
 
-       return cnt;
+       return ret ? ret : cnt;
 }
 
-#undef FIELD
-#define FIELD(type, name)                                              \
-       #type, "common_" #name, offsetof(typeof(field), name),          \
-               sizeof(field.name)
+static ssize_t
+system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
+                  loff_t *ppos)
+{
+       const char set_to_char[4] = { '?', '0', '1', 'X' };
+       const char *system = filp->private_data;
+       struct ftrace_event_call *call;
+       char buf[2];
+       int set = 0;
+       int ret;
+
+       mutex_lock(&event_mutex);
+       list_for_each_entry(call, &ftrace_events, list) {
+               if (!call->name || !call->regfunc)
+                       continue;
+
+               if (system && strcmp(call->system, system) != 0)
+                       continue;
+
+               /*
+                * We need to find out if all the events are set
+                * or if all events or cleared, or if we have
+                * a mixture.
+                */
+               set |= (1 << !!call->enabled);
+
+               /*
+                * If we have a mixture, no need to look further.
+                */
+               if (set == 3)
+                       break;
+       }
+       mutex_unlock(&event_mutex);
+
+       buf[0] = set_to_char[set];
+       buf[1] = '\n';
+
+       ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
+
+       return ret;
+}
 
-static int trace_write_header(struct trace_seq *s)
+static ssize_t
+system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
+                   loff_t *ppos)
 {
-       struct trace_entry field;
+       const char *system = filp->private_data;
+       unsigned long val;
+       char buf[64];
+       ssize_t ret;
+
+       if (cnt >= sizeof(buf))
+               return -EINVAL;
+
+       if (copy_from_user(&buf, ubuf, cnt))
+               return -EFAULT;
+
+       buf[cnt] = 0;
+
+       ret = strict_strtoul(buf, 10, &val);
+       if (ret < 0)
+               return ret;
+
+       ret = tracing_update_buffers();
+       if (ret < 0)
+               return ret;
+
+       if (val != 0 && val != 1)
+               return -EINVAL;
+
+       ret = __ftrace_set_clr_event(NULL, system, NULL, val);
+       if (ret)
+               goto out;
+
+       ret = cnt;
+
+out:
+       *ppos += cnt;
 
-       /* struct trace_entry */
-       return trace_seq_printf(s,
-                               "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
-                               "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
-                               "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
-                               "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
-                               "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
-                               "\n",
-                               FIELD(unsigned char, type),
-                               FIELD(unsigned char, flags),
-                               FIELD(unsigned char, preempt_count),
-                               FIELD(int, pid),
-                               FIELD(int, tgid));
+       return ret;
 }
 
 static ssize_t
@@ -398,9 +524,11 @@ event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
                  loff_t *ppos)
 {
        struct ftrace_event_call *call = filp->private_data;
+       struct ftrace_event_field *field;
        struct trace_seq *s;
+       int common_field_count = 5;
        char *buf;
-       int r;
+       int r = 0;
 
        if (*ppos)
                return 0;
@@ -411,14 +539,48 @@ event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
 
        trace_seq_init(s);
 
-       /* If any of the first writes fail, so will the show_format. */
-
        trace_seq_printf(s, "name: %s\n", call->name);
        trace_seq_printf(s, "ID: %d\n", call->id);
        trace_seq_printf(s, "format:\n");
-       trace_write_header(s);
 
-       r = call->show_format(s);
+       list_for_each_entry_reverse(field, &call->fields, link) {
+               /*
+                * Smartly shows the array type(except dynamic array).
+                * Normal:
+                *      field:TYPE VAR
+                * If TYPE := TYPE[LEN], it is shown:
+                *      field:TYPE VAR[LEN]
+                */
+               const char *array_descriptor = strchr(field->type, '[');
+
+               if (!strncmp(field->type, "__data_loc", 10))
+                       array_descriptor = NULL;
+
+               if (!array_descriptor) {
+                       r = trace_seq_printf(s, "\tfield:%s %s;\toffset:%u;"
+                                       "\tsize:%u;\tsigned:%d;\n",
+                                       field->type, field->name, field->offset,
+                                       field->size, !!field->is_signed);
+               } else {
+                       r = trace_seq_printf(s, "\tfield:%.*s %s%s;\toffset:%u;"
+                                       "\tsize:%u;\tsigned:%d;\n",
+                                       (int)(array_descriptor - field->type),
+                                       field->type, field->name,
+                                       array_descriptor, field->offset,
+                                       field->size, !!field->is_signed);
+               }
+
+               if (--common_field_count == 0)
+                       r = trace_seq_printf(s, "\n");
+
+               if (!r)
+                       break;
+       }
+
+       if (r)
+               r = trace_seq_printf(s, "\nprint fmt: %s\n",
+                               call->print_fmt);
+
        if (!r) {
                /*
                 * ug!  The format output is bigger than a PAGE!!
@@ -476,8 +638,8 @@ event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
 
        trace_seq_init(s);
 
-       r = filter_print_preds(call->preds, s->buffer);
-       r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, r);
+       print_event_filter(call, s);
+       r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
 
        kfree(s);
 
@@ -489,42 +651,113 @@ event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
                   loff_t *ppos)
 {
        struct ftrace_event_call *call = filp->private_data;
-       char buf[64], *pbuf = buf;
-       struct filter_pred *pred;
+       char *buf;
        int err;
 
-       if (cnt >= sizeof(buf))
+       if (cnt >= PAGE_SIZE)
                return -EINVAL;
 
-       if (copy_from_user(&buf, ubuf, cnt))
+       buf = (char *)__get_free_page(GFP_TEMPORARY);
+       if (!buf)
+               return -ENOMEM;
+
+       if (copy_from_user(buf, ubuf, cnt)) {
+               free_page((unsigned long) buf);
                return -EFAULT;
+       }
+       buf[cnt] = '\0';
+
+       err = apply_event_filter(call, buf);
+       free_page((unsigned long) buf);
+       if (err < 0)
+               return err;
+
+       *ppos += cnt;
+
+       return cnt;
+}
 
-       pred = kzalloc(sizeof(*pred), GFP_KERNEL);
-       if (!pred)
+static ssize_t
+subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
+                     loff_t *ppos)
+{
+       struct event_subsystem *system = filp->private_data;
+       struct trace_seq *s;
+       int r;
+
+       if (*ppos)
+               return 0;
+
+       s = kmalloc(sizeof(*s), GFP_KERNEL);
+       if (!s)
                return -ENOMEM;
 
-       err = filter_parse(&pbuf, pred);
-       if (err < 0) {
-               filter_free_pred(pred);
-               return err;
-       }
+       trace_seq_init(s);
 
-       if (pred->clear) {
-               filter_free_preds(call);
-               return cnt;
-       }
+       print_subsystem_event_filter(system, s);
+       r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
+
+       kfree(s);
 
-       if (filter_add_pred(call, pred)) {
-               filter_free_pred(pred);
+       return r;
+}
+
+static ssize_t
+subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
+                      loff_t *ppos)
+{
+       struct event_subsystem *system = filp->private_data;
+       char *buf;
+       int err;
+
+       if (cnt >= PAGE_SIZE)
                return -EINVAL;
+
+       buf = (char *)__get_free_page(GFP_TEMPORARY);
+       if (!buf)
+               return -ENOMEM;
+
+       if (copy_from_user(buf, ubuf, cnt)) {
+               free_page((unsigned long) buf);
+               return -EFAULT;
        }
+       buf[cnt] = '\0';
+
+       err = apply_subsystem_event_filter(system, buf);
+       free_page((unsigned long) buf);
+       if (err < 0)
+               return err;
 
        *ppos += cnt;
 
        return cnt;
 }
 
-static const struct seq_operations show_event_seq_ops = {
+static ssize_t
+show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       int (*func)(struct trace_seq *s) = filp->private_data;
+       struct trace_seq *s;
+       int r;
+
+       if (*ppos)
+               return 0;
+
+       s = kmalloc(sizeof(*s), GFP_KERNEL);
+       if (!s)
+               return -ENOMEM;
+
+       trace_seq_init(s);
+
+       func(s);
+       r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
+
+       kfree(s);
+
+       return r;
+}
+
+static const struct seq_operations show_event_seq_ops = {
        .start = t_start,
        .next = t_next,
        .show = t_show,
@@ -575,6 +808,23 @@ static const struct file_operations ftrace_event_filter_fops = {
        .write = event_filter_write,
 };
 
+static const struct file_operations ftrace_subsystem_filter_fops = {
+       .open = tracing_open_generic,
+       .read = subsystem_filter_read,
+       .write = subsystem_filter_write,
+};
+
+static const struct file_operations ftrace_system_enable_fops = {
+       .open = tracing_open_generic,
+       .read = system_enable_read,
+       .write = system_enable_write,
+};
+
+static const struct file_operations ftrace_show_header_fops = {
+       .open = tracing_open_generic,
+       .read = show_header,
+};
+
 static struct dentry *event_trace_events_dir(void)
 {
        static struct dentry *d_tracer;
@@ -595,23 +845,20 @@ static struct dentry *event_trace_events_dir(void)
        return d_events;
 }
 
-struct event_subsystem {
-       struct list_head        list;
-       const char              *name;
-       struct dentry           *entry;
-};
-
 static LIST_HEAD(event_subsystems);
 
 static struct dentry *
 event_subsystem_dir(const char *name, struct dentry *d_events)
 {
        struct event_subsystem *system;
+       struct dentry *entry;
 
        /* First see if we did not already create this dir */
        list_for_each_entry(system, &event_subsystems, list) {
-               if (strcmp(system->name, name) == 0)
+               if (strcmp(system->name, name) == 0) {
+                       system->nr_events++;
                        return system->entry;
+               }
        }
 
        /* need to create new entry */
@@ -630,34 +877,57 @@ event_subsystem_dir(const char *name, struct dentry *d_events)
                return d_events;
        }
 
-       system->name = name;
+       system->nr_events = 1;
+       system->name = kstrdup(name, GFP_KERNEL);
+       if (!system->name) {
+               debugfs_remove(system->entry);
+               kfree(system);
+               return d_events;
+       }
+
        list_add(&system->list, &event_subsystems);
 
+       system->filter = NULL;
+
+       system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
+       if (!system->filter) {
+               pr_warning("Could not allocate filter for subsystem "
+                          "'%s'\n", name);
+               return system->entry;
+       }
+
+       entry = debugfs_create_file("filter", 0644, system->entry, system,
+                                   &ftrace_subsystem_filter_fops);
+       if (!entry) {
+               kfree(system->filter);
+               system->filter = NULL;
+               pr_warning("Could not create debugfs "
+                          "'%s/filter' entry\n", name);
+       }
+
+       trace_create_file("enable", 0644, system->entry,
+                         (void *)system->name,
+                         &ftrace_system_enable_fops);
+
        return system->entry;
 }
 
 static int
-event_create_dir(struct ftrace_event_call *call, struct dentry *d_events)
+event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
+                const struct file_operations *id,
+                const struct file_operations *enable,
+                const struct file_operations *filter,
+                const struct file_operations *format)
 {
-       struct dentry *entry;
        int ret;
 
        /*
         * If the trace point header did not define TRACE_SYSTEM
         * then the system would be called "TRACE_SYSTEM".
         */
-       if (strcmp(call->system, "TRACE_SYSTEM") != 0)
+       if (strcmp(call->system, TRACE_SYSTEM) != 0)
                d_events = event_subsystem_dir(call->system, d_events);
 
-       if (call->raw_init) {
-               ret = call->raw_init();
-               if (ret < 0) {
-                       pr_warning("Could not initialize trace point"
-                                  " events/%s\n", call->name);
-                       return ret;
-               }
-       }
-
        call->dir = debugfs_create_dir(call->name, d_events);
        if (!call->dir) {
                pr_warning("Could not create debugfs "
@@ -665,56 +935,318 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events)
                return -1;
        }
 
-       if (call->regfunc) {
-               entry = debugfs_create_file("enable", 0644, call->dir, call,
-                                           &ftrace_enable_fops);
-               if (!entry)
-                       pr_warning("Could not create debugfs "
-                                  "'%s/enable' entry\n", call->name);
-       }
+       if (call->regfunc)
+               trace_create_file("enable", 0644, call->dir, call,
+                                 enable);
 
-       if (call->id) {
-               entry = debugfs_create_file("id", 0444, call->dir, call,
-                               &ftrace_event_id_fops);
-               if (!entry)
-                       pr_warning("Could not create debugfs '%s/id' entry\n",
-                                       call->name);
-       }
+       if (call->id && call->perf_event_enable)
+               trace_create_file("id", 0444, call->dir, call,
+                                 id);
 
        if (call->define_fields) {
-               ret = call->define_fields();
+               ret = trace_define_common_fields(call);
+               if (!ret)
+                       ret = call->define_fields(call);
                if (ret < 0) {
                        pr_warning("Could not initialize trace point"
                                   " events/%s\n", call->name);
                        return ret;
                }
+               trace_create_file("filter", 0644, call->dir, call,
+                                 filter);
        }
 
-       entry = debugfs_create_file("filter", 0444, call->dir, call,
-                                   &ftrace_event_filter_fops);
-       if (!entry)
-               pr_warning("Could not create debugfs "
-                          "'%s/filter' entry\n", call->name);
+       trace_create_file("format", 0444, call->dir, call,
+                         format);
 
-       /* A trace may not want to export its format */
-       if (!call->show_format)
-               return 0;
+       return 0;
+}
 
-       entry = debugfs_create_file("format", 0444, call->dir, call,
-                                   &ftrace_event_format_fops);
-       if (!entry)
-               pr_warning("Could not create debugfs "
-                          "'%s/format' entry\n", call->name);
+static int __trace_add_event_call(struct ftrace_event_call *call)
+{
+       struct dentry *d_events;
+       int ret;
+
+       if (!call->name)
+               return -EINVAL;
+
+       if (call->raw_init) {
+               ret = call->raw_init(call);
+               if (ret < 0) {
+                       if (ret != -ENOSYS)
+                               pr_warning("Could not initialize trace "
+                               "events/%s\n", call->name);
+                       return ret;
+               }
+       }
+
+       d_events = event_trace_events_dir();
+       if (!d_events)
+               return -ENOENT;
+
+       ret = event_create_dir(call, d_events, &ftrace_event_id_fops,
+                               &ftrace_enable_fops, &ftrace_event_filter_fops,
+                               &ftrace_event_format_fops);
+       if (!ret)
+               list_add(&call->list, &ftrace_events);
+
+       return ret;
+}
+
+/* Add an additional event_call dynamically */
+int trace_add_event_call(struct ftrace_event_call *call)
+{
+       int ret;
+       mutex_lock(&event_mutex);
+       ret = __trace_add_event_call(call);
+       mutex_unlock(&event_mutex);
+       return ret;
+}
+
+static void remove_subsystem_dir(const char *name)
+{
+       struct event_subsystem *system;
+
+       if (strcmp(name, TRACE_SYSTEM) == 0)
+               return;
+
+       list_for_each_entry(system, &event_subsystems, list) {
+               if (strcmp(system->name, name) == 0) {
+                       if (!--system->nr_events) {
+                               struct event_filter *filter = system->filter;
+
+                               debugfs_remove_recursive(system->entry);
+                               list_del(&system->list);
+                               if (filter) {
+                                       kfree(filter->filter_string);
+                                       kfree(filter);
+                               }
+                               kfree(system->name);
+                               kfree(system);
+                       }
+                       break;
+               }
+       }
+}
+
+/*
+ * Must be called under locking both of event_mutex and trace_event_mutex.
+ */
+static void __trace_remove_event_call(struct ftrace_event_call *call)
+{
+       ftrace_event_enable_disable(call, 0);
+       if (call->event)
+               __unregister_ftrace_event(call->event);
+       debugfs_remove_recursive(call->dir);
+       list_del(&call->list);
+       trace_destroy_fields(call);
+       destroy_preds(call);
+       remove_subsystem_dir(call->system);
+}
+
+/* Remove an event_call */
+void trace_remove_event_call(struct ftrace_event_call *call)
+{
+       mutex_lock(&event_mutex);
+       down_write(&trace_event_mutex);
+       __trace_remove_event_call(call);
+       up_write(&trace_event_mutex);
+       mutex_unlock(&event_mutex);
+}
+
+#define for_each_event(event, start, end)                      \
+       for (event = start;                                     \
+            (unsigned long)event < (unsigned long)end;         \
+            event++)
 
+#ifdef CONFIG_MODULES
+
+static LIST_HEAD(ftrace_module_file_list);
+
+/*
+ * Modules must own their file_operations to keep up with
+ * reference counting.
+ */
+struct ftrace_module_file_ops {
+       struct list_head                list;
+       struct module                   *mod;
+       struct file_operations          id;
+       struct file_operations          enable;
+       struct file_operations          format;
+       struct file_operations          filter;
+};
+
+static struct ftrace_module_file_ops *
+trace_create_file_ops(struct module *mod)
+{
+       struct ftrace_module_file_ops *file_ops;
+
+       /*
+        * This is a bit of a PITA. To allow for correct reference
+        * counting, modules must "own" their file_operations.
+        * To do this, we allocate the file operations that will be
+        * used in the event directory.
+        */
+
+       file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
+       if (!file_ops)
+               return NULL;
+
+       file_ops->mod = mod;
+
+       file_ops->id = ftrace_event_id_fops;
+       file_ops->id.owner = mod;
+
+       file_ops->enable = ftrace_enable_fops;
+       file_ops->enable.owner = mod;
+
+       file_ops->filter = ftrace_event_filter_fops;
+       file_ops->filter.owner = mod;
+
+       file_ops->format = ftrace_event_format_fops;
+       file_ops->format.owner = mod;
+
+       list_add(&file_ops->list, &ftrace_module_file_list);
+
+       return file_ops;
+}
+
+static void trace_module_add_events(struct module *mod)
+{
+       struct ftrace_module_file_ops *file_ops = NULL;
+       struct ftrace_event_call *call, *start, *end;
+       struct dentry *d_events;
+       int ret;
+
+       start = mod->trace_events;
+       end = mod->trace_events + mod->num_trace_events;
+
+       if (start == end)
+               return;
+
+       d_events = event_trace_events_dir();
+       if (!d_events)
+               return;
+
+       for_each_event(call, start, end) {
+               /* The linker may leave blanks */
+               if (!call->name)
+                       continue;
+               if (call->raw_init) {
+                       ret = call->raw_init(call);
+                       if (ret < 0) {
+                               if (ret != -ENOSYS)
+                                       pr_warning("Could not initialize trace "
+                                       "point events/%s\n", call->name);
+                               continue;
+                       }
+               }
+               /*
+                * This module has events, create file ops for this module
+                * if not already done.
+                */
+               if (!file_ops) {
+                       file_ops = trace_create_file_ops(mod);
+                       if (!file_ops)
+                               return;
+               }
+               call->mod = mod;
+               ret = event_create_dir(call, d_events,
+                                      &file_ops->id, &file_ops->enable,
+                                      &file_ops->filter, &file_ops->format);
+               if (!ret)
+                       list_add(&call->list, &ftrace_events);
+       }
+}
+
+static void trace_module_remove_events(struct module *mod)
+{
+       struct ftrace_module_file_ops *file_ops;
+       struct ftrace_event_call *call, *p;
+       bool found = false;
+
+       down_write(&trace_event_mutex);
+       list_for_each_entry_safe(call, p, &ftrace_events, list) {
+               if (call->mod == mod) {
+                       found = true;
+                       __trace_remove_event_call(call);
+               }
+       }
+
+       /* Now free the file_operations */
+       list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
+               if (file_ops->mod == mod)
+                       break;
+       }
+       if (&file_ops->list != &ftrace_module_file_list) {
+               list_del(&file_ops->list);
+               kfree(file_ops);
+       }
+
+       /*
+        * It is safest to reset the ring buffer if the module being unloaded
+        * registered any events.
+        */
+       if (found)
+               tracing_reset_current_online_cpus();
+       up_write(&trace_event_mutex);
+}
+
+static int trace_module_notify(struct notifier_block *self,
+                              unsigned long val, void *data)
+{
+       struct module *mod = data;
+
+       mutex_lock(&event_mutex);
+       switch (val) {
+       case MODULE_STATE_COMING:
+               trace_module_add_events(mod);
+               break;
+       case MODULE_STATE_GOING:
+               trace_module_remove_events(mod);
+               break;
+       }
+       mutex_unlock(&event_mutex);
+
+       return 0;
+}
+#else
+static int trace_module_notify(struct notifier_block *self,
+                              unsigned long val, void *data)
+{
        return 0;
 }
+#endif /* CONFIG_MODULES */
+
+static struct notifier_block trace_module_nb = {
+       .notifier_call = trace_module_notify,
+       .priority = 0,
+};
+
+extern struct ftrace_event_call __start_ftrace_events[];
+extern struct ftrace_event_call __stop_ftrace_events[];
+
+static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
+
+static __init int setup_trace_event(char *str)
+{
+       strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
+       ring_buffer_expanded = 1;
+       tracing_selftest_disabled = 1;
+
+       return 1;
+}
+__setup("trace_event=", setup_trace_event);
 
 static __init int event_trace_init(void)
 {
-       struct ftrace_event_call *call = __start_ftrace_events;
+       struct ftrace_event_call *call;
        struct dentry *d_tracer;
        struct dentry *entry;
        struct dentry *d_events;
+       int ret;
+       char *buf = bootup_event_buf;
+       char *token;
 
        d_tracer = tracing_init_dentry();
        if (!d_tracer)
@@ -738,13 +1270,281 @@ static __init int event_trace_init(void)
        if (!d_events)
                return 0;
 
-       for_each_event(call) {
+       /* ring buffer internal formats */
+       trace_create_file("header_page", 0444, d_events,
+                         ring_buffer_print_page_header,
+                         &ftrace_show_header_fops);
+
+       trace_create_file("header_event", 0444, d_events,
+                         ring_buffer_print_entry_header,
+                         &ftrace_show_header_fops);
+
+       trace_create_file("enable", 0644, d_events,
+                         NULL, &ftrace_system_enable_fops);
+
+       for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
                /* The linker may leave blanks */
                if (!call->name)
                        continue;
-               event_create_dir(call, d_events);
+               if (call->raw_init) {
+                       ret = call->raw_init(call);
+                       if (ret < 0) {
+                               if (ret != -ENOSYS)
+                                       pr_warning("Could not initialize trace "
+                                       "point events/%s\n", call->name);
+                               continue;
+                       }
+               }
+               ret = event_create_dir(call, d_events, &ftrace_event_id_fops,
+                                      &ftrace_enable_fops,
+                                      &ftrace_event_filter_fops,
+                                      &ftrace_event_format_fops);
+               if (!ret)
+                       list_add(&call->list, &ftrace_events);
+       }
+
+       while (true) {
+               token = strsep(&buf, ",");
+
+               if (!token)
+                       break;
+               if (!*token)
+                       continue;
+
+               ret = ftrace_set_clr_event(token, 1);
+               if (ret)
+                       pr_warning("Failed to enable trace event: %s\n", token);
        }
 
+       ret = register_module_notifier(&trace_module_nb);
+       if (ret)
+               pr_warning("Failed to register trace events module notifier\n");
+
        return 0;
 }
 fs_initcall(event_trace_init);
+
+#ifdef CONFIG_FTRACE_STARTUP_TEST
+
+static DEFINE_SPINLOCK(test_spinlock);
+static DEFINE_SPINLOCK(test_spinlock_irq);
+static DEFINE_MUTEX(test_mutex);
+
+static __init void test_work(struct work_struct *dummy)
+{
+       spin_lock(&test_spinlock);
+       spin_lock_irq(&test_spinlock_irq);
+       udelay(1);
+       spin_unlock_irq(&test_spinlock_irq);
+       spin_unlock(&test_spinlock);
+
+       mutex_lock(&test_mutex);
+       msleep(1);
+       mutex_unlock(&test_mutex);
+}
+
+static __init int event_test_thread(void *unused)
+{
+       void *test_malloc;
+
+       test_malloc = kmalloc(1234, GFP_KERNEL);
+       if (!test_malloc)
+               pr_info("failed to kmalloc\n");
+
+       schedule_on_each_cpu(test_work);
+
+       kfree(test_malloc);
+
+       set_current_state(TASK_INTERRUPTIBLE);
+       while (!kthread_should_stop())
+               schedule();
+
+       return 0;
+}
+
+/*
+ * Do various things that may trigger events.
+ */
+static __init void event_test_stuff(void)
+{
+       struct task_struct *test_thread;
+
+       test_thread = kthread_run(event_test_thread, NULL, "test-events");
+       msleep(1);
+       kthread_stop(test_thread);
+}
+
+/*
+ * For every trace event defined, we will test each trace point separately,
+ * and then by groups, and finally all trace points.
+ */
+static __init void event_trace_self_tests(void)
+{
+       struct ftrace_event_call *call;
+       struct event_subsystem *system;
+       int ret;
+
+       pr_info("Running tests on trace events:\n");
+
+       list_for_each_entry(call, &ftrace_events, list) {
+
+               /* Only test those that have a regfunc */
+               if (!call->regfunc)
+                       continue;
+
+/*
+ * Testing syscall events here is pretty useless, but
+ * we still do it if configured. But this is time consuming.
+ * What we really need is a user thread to perform the
+ * syscalls as we test.
+ */
+#ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
+               if (call->system &&
+                   strcmp(call->system, "syscalls") == 0)
+                       continue;
+#endif
+
+               pr_info("Testing event %s: ", call->name);
+
+               /*
+                * If an event is already enabled, someone is using
+                * it and the self test should not be on.
+                */
+               if (call->enabled) {
+                       pr_warning("Enabled event during self test!\n");
+                       WARN_ON_ONCE(1);
+                       continue;
+               }
+
+               ftrace_event_enable_disable(call, 1);
+               event_test_stuff();
+               ftrace_event_enable_disable(call, 0);
+
+               pr_cont("OK\n");
+       }
+
+       /* Now test at the sub system level */
+
+       pr_info("Running tests on trace event systems:\n");
+
+       list_for_each_entry(system, &event_subsystems, list) {
+
+               /* the ftrace system is special, skip it */
+               if (strcmp(system->name, "ftrace") == 0)
+                       continue;
+
+               pr_info("Testing event system %s: ", system->name);
+
+               ret = __ftrace_set_clr_event(NULL, system->name, NULL, 1);
+               if (WARN_ON_ONCE(ret)) {
+                       pr_warning("error enabling system %s\n",
+                                  system->name);
+                       continue;
+               }
+
+               event_test_stuff();
+
+               ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0);
+               if (WARN_ON_ONCE(ret))
+                       pr_warning("error disabling system %s\n",
+                                  system->name);
+
+               pr_cont("OK\n");
+       }
+
+       /* Test with all events enabled */
+
+       pr_info("Running tests on all trace events:\n");
+       pr_info("Testing all events: ");
+
+       ret = __ftrace_set_clr_event(NULL, NULL, NULL, 1);
+       if (WARN_ON_ONCE(ret)) {
+               pr_warning("error enabling all events\n");
+               return;
+       }
+
+       event_test_stuff();
+
+       /* reset sysname */
+       ret = __ftrace_set_clr_event(NULL, NULL, NULL, 0);
+       if (WARN_ON_ONCE(ret)) {
+               pr_warning("error disabling all events\n");
+               return;
+       }
+
+       pr_cont("OK\n");
+}
+
+#ifdef CONFIG_FUNCTION_TRACER
+
+static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
+
+static void
+function_test_events_call(unsigned long ip, unsigned long parent_ip)
+{
+       struct ring_buffer_event *event;
+       struct ring_buffer *buffer;
+       struct ftrace_entry *entry;
+       unsigned long flags;
+       long disabled;
+       int resched;
+       int cpu;
+       int pc;
+
+       pc = preempt_count();
+       resched = ftrace_preempt_disable();
+       cpu = raw_smp_processor_id();
+       disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
+
+       if (disabled != 1)
+               goto out;
+
+       local_save_flags(flags);
+
+       event = trace_current_buffer_lock_reserve(&buffer,
+                                                 TRACE_FN, sizeof(*entry),
+                                                 flags, pc);
+       if (!event)
+               goto out;
+       entry   = ring_buffer_event_data(event);
+       entry->ip                       = ip;
+       entry->parent_ip                = parent_ip;
+
+       trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
+
+ out:
+       atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
+       ftrace_preempt_enable(resched);
+}
+
+static struct ftrace_ops trace_ops __initdata  =
+{
+       .func = function_test_events_call,
+};
+
+static __init void event_trace_self_test_with_function(void)
+{
+       register_ftrace_function(&trace_ops);
+       pr_info("Running tests again, along with the function tracer\n");
+       event_trace_self_tests();
+       unregister_ftrace_function(&trace_ops);
+}
+#else
+static __init void event_trace_self_test_with_function(void)
+{
+}
+#endif
+
+static __init int event_trace_self_tests_init(void)
+{
+       if (!tracing_selftest_disabled) {
+               event_trace_self_tests();
+               event_trace_self_test_with_function();
+       }
+
+       return 0;
+}
+
+late_initcall(event_trace_self_tests_init);
+
+#endif