sysctl: remove "struct file *" argument of ->proc_handler
[safe/jmp/linux-2.6] / kernel / trace / ftrace.c
index c7f4a4b..a142579 100644 (file)
 #include <linux/list.h>
 #include <linux/hash.h>
 
-#include <trace/sched.h>
+#include <trace/events/sched.h>
 
 #include <asm/ftrace.h>
+#include <asm/setup.h>
 
 #include "trace_output.h"
 #include "trace_stat.h"
@@ -290,7 +291,9 @@ function_stat_next(void *v, int idx)
        pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
 
  again:
-       rec++;
+       if (idx != 0)
+               rec++;
+
        if ((void *)rec >= (void *)&pg->records[pg->index]) {
                pg = pg->next;
                if (!pg)
@@ -598,7 +601,7 @@ function_profile_call(unsigned long ip, unsigned long parent_ip)
        local_irq_save(flags);
 
        stat = &__get_cpu_var(ftrace_profile_stats);
-       if (!stat->hash)
+       if (!stat->hash || !ftrace_profile_enabled)
                goto out;
 
        rec = ftrace_find_profiled_func(stat, ip);
@@ -629,7 +632,7 @@ static void profile_graph_return(struct ftrace_graph_ret *trace)
 
        local_irq_save(flags);
        stat = &__get_cpu_var(ftrace_profile_stats);
-       if (!stat->hash)
+       if (!stat->hash || !ftrace_profile_enabled)
                goto out;
 
        calltime = trace->rettime - trace->calltime;
@@ -723,6 +726,10 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf,
                        ftrace_profile_enabled = 1;
                } else {
                        ftrace_profile_enabled = 0;
+                       /*
+                        * unregister_ftrace_profiler calls stop_machine
+                        * so this acts like an synchronize_sched.
+                        */
                        unregister_ftrace_profiler();
                }
        }
@@ -761,7 +768,7 @@ static struct tracer_stat function_stats __initdata = {
        .stat_show      = function_stat_show
 };
 
-static void ftrace_profile_debugfs(struct dentry *d_tracer)
+static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
 {
        struct ftrace_profile_stat *stat;
        struct dentry *entry;
@@ -779,7 +786,6 @@ static void ftrace_profile_debugfs(struct dentry *d_tracer)
                         * The files created are permanent, if something happens
                         * we still do not free memory.
                         */
-                       kfree(stat);
                        WARN(1,
                             "Could not allocate stat file for cpu %d\n",
                             cpu);
@@ -806,7 +812,7 @@ static void ftrace_profile_debugfs(struct dentry *d_tracer)
 }
 
 #else /* CONFIG_FUNCTION_PROFILER */
-static void ftrace_profile_debugfs(struct dentry *d_tracer)
+static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
 {
 }
 #endif /* CONFIG_FUNCTION_PROFILER */
@@ -916,25 +922,6 @@ static void ftrace_free_rec(struct dyn_ftrace *rec)
        rec->flags |= FTRACE_FL_FREE;
 }
 
-void ftrace_release(void *start, unsigned long size)
-{
-       struct dyn_ftrace *rec;
-       struct ftrace_page *pg;
-       unsigned long s = (unsigned long)start;
-       unsigned long e = s + size;
-
-       if (ftrace_disabled || !start)
-               return;
-
-       mutex_lock(&ftrace_lock);
-       do_for_each_ftrace_rec(pg, rec) {
-               if ((rec->ip >= s) && (rec->ip < e) &&
-                   !(rec->flags & FTRACE_FL_FREE))
-                       ftrace_free_rec(rec);
-       } while_for_each_ftrace_rec();
-       mutex_unlock(&ftrace_lock);
-}
-
 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
 {
        struct dyn_ftrace *rec;
@@ -1029,71 +1016,35 @@ static int
 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
 {
        unsigned long ftrace_addr;
-       unsigned long ip, fl;
+       unsigned long flag = 0UL;
 
        ftrace_addr = (unsigned long)FTRACE_ADDR;
 
-       ip = rec->ip;
-
        /*
-        * If this record is not to be traced and
-        * it is not enabled then do nothing.
+        * If this record is not to be traced or we want to disable it,
+        * then disable it.
         *
-        * If this record is not to be traced and
-        * it is enabled then disable it.
+        * If we want to enable it and filtering is off, then enable it.
         *
+        * If we want to enable it and filtering is on, enable it only if
+        * it's filtered
         */
-       if (rec->flags & FTRACE_FL_NOTRACE) {
-               if (rec->flags & FTRACE_FL_ENABLED)
-                       rec->flags &= ~FTRACE_FL_ENABLED;
-               else
-                       return 0;
-
-       } else if (ftrace_filtered && enable) {
-               /*
-                * Filtering is on:
-                */
-
-               fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
-
-               /* Record is filtered and enabled, do nothing */
-               if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
-                       return 0;
-
-               /* Record is not filtered or enabled, do nothing */
-               if (!fl)
-                       return 0;
-
-               /* Record is not filtered but enabled, disable it */
-               if (fl == FTRACE_FL_ENABLED)
-                       rec->flags &= ~FTRACE_FL_ENABLED;
-               else
-               /* Otherwise record is filtered but not enabled, enable it */
-                       rec->flags |= FTRACE_FL_ENABLED;
-       } else {
-               /* Disable or not filtered */
-
-               if (enable) {
-                       /* if record is enabled, do nothing */
-                       if (rec->flags & FTRACE_FL_ENABLED)
-                               return 0;
-
-                       rec->flags |= FTRACE_FL_ENABLED;
-
-               } else {
+       if (enable && !(rec->flags & FTRACE_FL_NOTRACE)) {
+               if (!ftrace_filtered || (rec->flags & FTRACE_FL_FILTER))
+                       flag = FTRACE_FL_ENABLED;
+       }
 
-                       /* if record is not enabled, do nothing */
-                       if (!(rec->flags & FTRACE_FL_ENABLED))
-                               return 0;
+       /* If the state of this record hasn't changed, then do nothing */
+       if ((rec->flags & FTRACE_FL_ENABLED) == flag)
+               return 0;
 
-                       rec->flags &= ~FTRACE_FL_ENABLED;
-               }
+       if (flag) {
+               rec->flags |= FTRACE_FL_ENABLED;
+               return ftrace_make_call(rec, ftrace_addr);
        }
 
-       if (rec->flags & FTRACE_FL_ENABLED)
-               return ftrace_make_call(rec, ftrace_addr);
-       else
-               return ftrace_make_nop(NULL, rec, ftrace_addr);
+       rec->flags &= ~FTRACE_FL_ENABLED;
+       return ftrace_make_nop(NULL, rec, ftrace_addr);
 }
 
 static void ftrace_replace_code(int enable)
@@ -1238,6 +1189,13 @@ static void ftrace_shutdown(int command)
                return;
 
        ftrace_start_up--;
+       /*
+        * Just warn in case of unbalance, no need to kill ftrace, it's not
+        * critical but the ftrace_call callers may be never nopped again after
+        * further ftrace uses.
+        */
+       WARN_ON_ONCE(ftrace_start_up < 0);
+
        if (!ftrace_start_up)
                command |= FTRACE_DISABLE_CALLS;
 
@@ -1365,11 +1323,10 @@ static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
 
 enum {
        FTRACE_ITER_FILTER      = (1 << 0),
-       FTRACE_ITER_CONT        = (1 << 1),
-       FTRACE_ITER_NOTRACE     = (1 << 2),
-       FTRACE_ITER_FAILURES    = (1 << 3),
-       FTRACE_ITER_PRINTALL    = (1 << 4),
-       FTRACE_ITER_HASH        = (1 << 5),
+       FTRACE_ITER_NOTRACE     = (1 << 1),
+       FTRACE_ITER_FAILURES    = (1 << 2),
+       FTRACE_ITER_PRINTALL    = (1 << 3),
+       FTRACE_ITER_HASH        = (1 << 4),
 };
 
 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
@@ -1379,9 +1336,7 @@ struct ftrace_iterator {
        int                     hidx;
        int                     idx;
        unsigned                flags;
-       unsigned char           buffer[FTRACE_BUFF_MAX+1];
-       unsigned                buffer_idx;
-       unsigned                filtered;
+       struct trace_parser     parser;
 };
 
 static void *
@@ -1424,28 +1379,33 @@ static void *t_hash_start(struct seq_file *m, loff_t *pos)
 {
        struct ftrace_iterator *iter = m->private;
        void *p = NULL;
+       loff_t l;
+
+       if (!(iter->flags & FTRACE_ITER_HASH))
+               *pos = 0;
 
        iter->flags |= FTRACE_ITER_HASH;
 
-       return t_hash_next(m, p, pos);
+       iter->hidx = 0;
+       for (l = 0; l <= *pos; ) {
+               p = t_hash_next(m, p, &l);
+               if (!p)
+                       break;
+       }
+       return p;
 }
 
 static int t_hash_show(struct seq_file *m, void *v)
 {
        struct ftrace_func_probe *rec;
        struct hlist_node *hnd = v;
-       char str[KSYM_SYMBOL_LEN];
 
        rec = hlist_entry(hnd, struct ftrace_func_probe, node);
 
        if (rec->ops->print)
                return rec->ops->print(m, rec->ip, rec->ops, rec->data);
 
-       kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
-       seq_printf(m, "%s:", str);
-
-       kallsyms_lookup((unsigned long)rec->ops->func, NULL, NULL, NULL, str);
-       seq_printf(m, "%s", str);
+       seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
 
        if (rec->data)
                seq_printf(m, ":%p", rec->data);
@@ -1474,8 +1434,6 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
                        iter->pg = iter->pg->next;
                        iter->idx = 0;
                        goto retry;
-               } else {
-                       iter->idx = -1;
                }
        } else {
                rec = &iter->pg->records[iter->idx++];
@@ -1504,6 +1462,7 @@ static void *t_start(struct seq_file *m, loff_t *pos)
 {
        struct ftrace_iterator *iter = m->private;
        void *p = NULL;
+       loff_t l;
 
        mutex_lock(&ftrace_lock);
        /*
@@ -1515,23 +1474,21 @@ static void *t_start(struct seq_file *m, loff_t *pos)
                if (*pos > 0)
                        return t_hash_start(m, pos);
                iter->flags |= FTRACE_ITER_PRINTALL;
-               (*pos)++;
                return iter;
        }
 
        if (iter->flags & FTRACE_ITER_HASH)
                return t_hash_start(m, pos);
 
-       if (*pos > 0) {
-               if (iter->idx < 0)
-                       return p;
-               (*pos)--;
-               iter->idx--;
+       iter->pg = ftrace_pages_start;
+       iter->idx = 0;
+       for (l = 0; l <= *pos; ) {
+               p = t_next(m, p, &l);
+               if (!p)
+                       break;
        }
 
-       p = t_next(m, p, pos);
-
-       if (!p)
+       if (!p && iter->flags & FTRACE_ITER_FILTER)
                return t_hash_start(m, pos);
 
        return p;
@@ -1546,7 +1503,6 @@ static int t_show(struct seq_file *m, void *v)
 {
        struct ftrace_iterator *iter = m->private;
        struct dyn_ftrace *rec = v;
-       char str[KSYM_SYMBOL_LEN];
 
        if (iter->flags & FTRACE_ITER_HASH)
                return t_hash_show(m, v);
@@ -1559,14 +1515,12 @@ static int t_show(struct seq_file *m, void *v)
        if (!rec)
                return 0;
 
-       kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
-
-       seq_printf(m, "%s\n", str);
+       seq_printf(m, "%ps\n", (void *)rec->ip);
 
        return 0;
 }
 
-static struct seq_operations show_ftrace_seq_ops = {
+static const struct seq_operations show_ftrace_seq_ops = {
        .start = t_start,
        .next = t_next,
        .stop = t_stop,
@@ -1600,17 +1554,6 @@ ftrace_avail_open(struct inode *inode, struct file *file)
        return ret;
 }
 
-int ftrace_avail_release(struct inode *inode, struct file *file)
-{
-       struct seq_file *m = (struct seq_file *)file->private_data;
-       struct ftrace_iterator *iter = m->private;
-
-       seq_release(inode, file);
-       kfree(iter);
-
-       return 0;
-}
-
 static int
 ftrace_failures_open(struct inode *inode, struct file *file)
 {
@@ -1659,9 +1602,14 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable)
        if (!iter)
                return -ENOMEM;
 
+       if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
+               kfree(iter);
+               return -ENOMEM;
+       }
+
        mutex_lock(&ftrace_regex_lock);
        if ((file->f_mode & FMODE_WRITE) &&
-           !(file->f_flags & O_APPEND))
+           (file->f_flags & O_TRUNC))
                ftrace_filter_reset(enable);
 
        if (file->f_mode & FMODE_READ) {
@@ -2114,9 +2062,9 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
        int i, len = 0;
        char *search;
 
-       if (glob && (strcmp(glob, "*") || !strlen(glob)))
+       if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
                glob = NULL;
-       else {
+       else if (glob) {
                int not;
 
                type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
@@ -2251,9 +2199,8 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
                   size_t cnt, loff_t *ppos, int enable)
 {
        struct ftrace_iterator *iter;
-       char ch;
-       size_t read = 0;
-       ssize_t ret;
+       struct trace_parser *parser;
+       ssize_t ret, read;
 
        if (!cnt || cnt < 0)
                return 0;
@@ -2266,68 +2213,23 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
        } else
                iter = file->private_data;
 
-       if (!*ppos) {
-               iter->flags &= ~FTRACE_ITER_CONT;
-               iter->buffer_idx = 0;
-       }
-
-       ret = get_user(ch, ubuf++);
-       if (ret)
-               goto out;
-       read++;
-       cnt--;
-
-       if (!(iter->flags & ~FTRACE_ITER_CONT)) {
-               /* skip white space */
-               while (cnt && isspace(ch)) {
-                       ret = get_user(ch, ubuf++);
-                       if (ret)
-                               goto out;
-                       read++;
-                       cnt--;
-               }
-
-               if (isspace(ch)) {
-                       file->f_pos += read;
-                       ret = read;
-                       goto out;
-               }
+       parser = &iter->parser;
+       read = trace_get_user(parser, ubuf, cnt, ppos);
 
-               iter->buffer_idx = 0;
-       }
-
-       while (cnt && !isspace(ch)) {
-               if (iter->buffer_idx < FTRACE_BUFF_MAX)
-                       iter->buffer[iter->buffer_idx++] = ch;
-               else {
-                       ret = -EINVAL;
-                       goto out;
-               }
-               ret = get_user(ch, ubuf++);
+       if (trace_parser_loaded(parser) &&
+           !trace_parser_cont(parser)) {
+               ret = ftrace_process_regex(parser->buffer,
+                                          parser->idx, enable);
                if (ret)
                        goto out;
-               read++;
-               cnt--;
-       }
-
-       if (isspace(ch)) {
-               iter->filtered++;
-               iter->buffer[iter->buffer_idx] = 0;
-               ret = ftrace_process_regex(iter->buffer,
-                                          iter->buffer_idx, enable);
-               if (ret)
-                       goto out;
-               iter->buffer_idx = 0;
-       } else
-               iter->flags |= FTRACE_ITER_CONT;
-
 
-       file->f_pos += read;
+               trace_parser_clear(parser);
+       }
 
        ret = read;
- out:
-       mutex_unlock(&ftrace_regex_lock);
 
+       mutex_unlock(&ftrace_regex_lock);
+out:
        return ret;
 }
 
@@ -2388,11 +2290,51 @@ void ftrace_set_notrace(unsigned char *buf, int len, int reset)
        ftrace_set_regex(buf, len, reset, 0);
 }
 
+/*
+ * command line interface to allow users to set filters on boot up.
+ */
+#define FTRACE_FILTER_SIZE             COMMAND_LINE_SIZE
+static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
+static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
+
+static int __init set_ftrace_notrace(char *str)
+{
+       strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
+       return 1;
+}
+__setup("ftrace_notrace=", set_ftrace_notrace);
+
+static int __init set_ftrace_filter(char *str)
+{
+       strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
+       return 1;
+}
+__setup("ftrace_filter=", set_ftrace_filter);
+
+static void __init set_ftrace_early_filter(char *buf, int enable)
+{
+       char *func;
+
+       while (buf) {
+               func = strsep(&buf, ",");
+               ftrace_set_regex(func, strlen(func), 0, enable);
+       }
+}
+
+static void __init set_ftrace_early_filters(void)
+{
+       if (ftrace_filter_buf[0])
+               set_ftrace_early_filter(ftrace_filter_buf, 1);
+       if (ftrace_notrace_buf[0])
+               set_ftrace_early_filter(ftrace_notrace_buf, 0);
+}
+
 static int
 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
 {
        struct seq_file *m = (struct seq_file *)file->private_data;
        struct ftrace_iterator *iter;
+       struct trace_parser *parser;
 
        mutex_lock(&ftrace_regex_lock);
        if (file->f_mode & FMODE_READ) {
@@ -2402,10 +2344,10 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable)
        } else
                iter = file->private_data;
 
-       if (iter->buffer_idx) {
-               iter->filtered++;
-               iter->buffer[iter->buffer_idx] = 0;
-               ftrace_match_records(iter->buffer, iter->buffer_idx, enable);
+       parser = &iter->parser;
+       if (trace_parser_loaded(parser)) {
+               parser->buffer[parser->idx] = 0;
+               ftrace_match_records(parser->buffer, parser->idx, enable);
        }
 
        mutex_lock(&ftrace_lock);
@@ -2413,7 +2355,9 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable)
                ftrace_run_update_code(FTRACE_ENABLE_CALLS);
        mutex_unlock(&ftrace_lock);
 
+       trace_parser_put(parser);
        kfree(iter);
+
        mutex_unlock(&ftrace_regex_lock);
        return 0;
 }
@@ -2434,14 +2378,14 @@ static const struct file_operations ftrace_avail_fops = {
        .open = ftrace_avail_open,
        .read = seq_read,
        .llseek = seq_lseek,
-       .release = ftrace_avail_release,
+       .release = seq_release_private,
 };
 
 static const struct file_operations ftrace_failures_fops = {
        .open = ftrace_failures_open,
        .read = seq_read,
        .llseek = seq_lseek,
-       .release = ftrace_avail_release,
+       .release = seq_release_private,
 };
 
 static const struct file_operations ftrace_filter_fops = {
@@ -2468,32 +2412,29 @@ int ftrace_graph_count;
 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
 
 static void *
-g_next(struct seq_file *m, void *v, loff_t *pos)
+__g_next(struct seq_file *m, loff_t *pos)
 {
-       unsigned long *array = m->private;
-       int index = *pos;
-
-       (*pos)++;
-
-       if (index >= ftrace_graph_count)
+       if (*pos >= ftrace_graph_count)
                return NULL;
+       return &ftrace_graph_funcs[*pos];
+}
 
-       return &array[index];
+static void *
+g_next(struct seq_file *m, void *v, loff_t *pos)
+{
+       (*pos)++;
+       return __g_next(m, pos);
 }
 
 static void *g_start(struct seq_file *m, loff_t *pos)
 {
-       void *p = NULL;
-
        mutex_lock(&graph_lock);
 
        /* Nothing, tell g_show to print all functions are enabled */
        if (!ftrace_graph_count && !*pos)
                return (void *)1;
 
-       p = g_next(m, p, pos);
-
-       return p;
+       return __g_next(m, pos);
 }
 
 static void g_stop(struct seq_file *m, void *p)
@@ -2504,7 +2445,6 @@ static void g_stop(struct seq_file *m, void *p)
 static int g_show(struct seq_file *m, void *v)
 {
        unsigned long *ptr = v;
-       char str[KSYM_SYMBOL_LEN];
 
        if (!ptr)
                return 0;
@@ -2514,14 +2454,12 @@ static int g_show(struct seq_file *m, void *v)
                return 0;
        }
 
-       kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
-
-       seq_printf(m, "%s\n", str);
+       seq_printf(m, "%ps\n", (void *)*ptr);
 
        return 0;
 }
 
-static struct seq_operations ftrace_graph_seq_ops = {
+static const struct seq_operations ftrace_graph_seq_ops = {
        .start = g_start,
        .next = g_next,
        .stop = g_stop,
@@ -2538,25 +2476,27 @@ ftrace_graph_open(struct inode *inode, struct file *file)
 
        mutex_lock(&graph_lock);
        if ((file->f_mode & FMODE_WRITE) &&
-           !(file->f_flags & O_APPEND)) {
+           (file->f_flags & O_TRUNC)) {
                ftrace_graph_count = 0;
                memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
        }
+       mutex_unlock(&graph_lock);
 
-       if (file->f_mode & FMODE_READ) {
+       if (file->f_mode & FMODE_READ)
                ret = seq_open(file, &ftrace_graph_seq_ops);
-               if (!ret) {
-                       struct seq_file *m = file->private_data;
-                       m->private = ftrace_graph_funcs;
-               }
-       } else
-               file->private_data = ftrace_graph_funcs;
-       mutex_unlock(&graph_lock);
 
        return ret;
 }
 
 static int
+ftrace_graph_release(struct inode *inode, struct file *file)
+{
+       if (file->f_mode & FMODE_READ)
+               seq_release(inode, file);
+       return 0;
+}
+
+static int
 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
 {
        struct dyn_ftrace *rec;
@@ -2611,12 +2551,9 @@ static ssize_t
 ftrace_graph_write(struct file *file, const char __user *ubuf,
                   size_t cnt, loff_t *ppos)
 {
-       unsigned char buffer[FTRACE_BUFF_MAX+1];
-       unsigned long *array;
+       struct trace_parser parser;
        size_t read = 0;
        ssize_t ret;
-       int index = 0;
-       char ch;
 
        if (!cnt || cnt < 0)
                return 0;
@@ -2628,103 +2565,58 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
                goto out;
        }
 
-       if (file->f_mode & FMODE_READ) {
-               struct seq_file *m = file->private_data;
-               array = m->private;
-       } else
-               array = file->private_data;
-
-       ret = get_user(ch, ubuf++);
-       if (ret)
+       if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
+               ret = -ENOMEM;
                goto out;
-       read++;
-       cnt--;
-
-       /* skip white space */
-       while (cnt && isspace(ch)) {
-               ret = get_user(ch, ubuf++);
-               if (ret)
-                       goto out;
-               read++;
-               cnt--;
        }
 
-       if (isspace(ch)) {
-               *ppos += read;
-               ret = read;
-               goto out;
-       }
+       read = trace_get_user(&parser, ubuf, cnt, ppos);
 
-       while (cnt && !isspace(ch)) {
-               if (index < FTRACE_BUFF_MAX)
-                       buffer[index++] = ch;
-               else {
-                       ret = -EINVAL;
-                       goto out;
-               }
-               ret = get_user(ch, ubuf++);
+       if (trace_parser_loaded((&parser))) {
+               parser.buffer[parser.idx] = 0;
+
+               /* we allow only one expression at a time */
+               ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
+                                       parser.buffer);
                if (ret)
                        goto out;
-               read++;
-               cnt--;
        }
-       buffer[index] = 0;
-
-       /* we allow only one expression at a time */
-       ret = ftrace_set_func(array, &ftrace_graph_count, buffer);
-       if (ret)
-               goto out;
-
-       file->f_pos += read;
 
        ret = read;
  out:
+       trace_parser_put(&parser);
        mutex_unlock(&graph_lock);
 
        return ret;
 }
 
 static const struct file_operations ftrace_graph_fops = {
-       .open = ftrace_graph_open,
-       .read = seq_read,
-       .write = ftrace_graph_write,
+       .open           = ftrace_graph_open,
+       .read           = seq_read,
+       .write          = ftrace_graph_write,
+       .release        = ftrace_graph_release,
 };
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 
 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
 {
-       struct dentry *entry;
 
-       entry = debugfs_create_file("available_filter_functions", 0444,
-                                   d_tracer, NULL, &ftrace_avail_fops);
-       if (!entry)
-               pr_warning("Could not create debugfs "
-                          "'available_filter_functions' entry\n");
+       trace_create_file("available_filter_functions", 0444,
+                       d_tracer, NULL, &ftrace_avail_fops);
 
-       entry = debugfs_create_file("failures", 0444,
-                                   d_tracer, NULL, &ftrace_failures_fops);
-       if (!entry)
-               pr_warning("Could not create debugfs 'failures' entry\n");
+       trace_create_file("failures", 0444,
+                       d_tracer, NULL, &ftrace_failures_fops);
 
-       entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
-                                   NULL, &ftrace_filter_fops);
-       if (!entry)
-               pr_warning("Could not create debugfs "
-                          "'set_ftrace_filter' entry\n");
+       trace_create_file("set_ftrace_filter", 0644, d_tracer,
+                       NULL, &ftrace_filter_fops);
 
-       entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
+       trace_create_file("set_ftrace_notrace", 0644, d_tracer,
                                    NULL, &ftrace_notrace_fops);
-       if (!entry)
-               pr_warning("Could not create debugfs "
-                          "'set_ftrace_notrace' entry\n");
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-       entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
+       trace_create_file("set_graph_function", 0444, d_tracer,
                                    NULL,
                                    &ftrace_graph_fops);
-       if (!entry)
-               pr_warning("Could not create debugfs "
-                          "'set_graph_function' entry\n");
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 
        return 0;
@@ -2762,14 +2654,72 @@ static int ftrace_convert_nops(struct module *mod,
        return 0;
 }
 
-void ftrace_init_module(struct module *mod,
-                       unsigned long *start, unsigned long *end)
+#ifdef CONFIG_MODULES
+void ftrace_release(void *start, void *end)
+{
+       struct dyn_ftrace *rec;
+       struct ftrace_page *pg;
+       unsigned long s = (unsigned long)start;
+       unsigned long e = (unsigned long)end;
+
+       if (ftrace_disabled || !start || start == end)
+               return;
+
+       mutex_lock(&ftrace_lock);
+       do_for_each_ftrace_rec(pg, rec) {
+               if ((rec->ip >= s) && (rec->ip < e)) {
+                       /*
+                        * rec->ip is changed in ftrace_free_rec()
+                        * It should not between s and e if record was freed.
+                        */
+                       FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
+                       ftrace_free_rec(rec);
+               }
+       } while_for_each_ftrace_rec();
+       mutex_unlock(&ftrace_lock);
+}
+
+static void ftrace_init_module(struct module *mod,
+                              unsigned long *start, unsigned long *end)
 {
        if (ftrace_disabled || start == end)
                return;
        ftrace_convert_nops(mod, start, end);
 }
 
+static int ftrace_module_notify(struct notifier_block *self,
+                               unsigned long val, void *data)
+{
+       struct module *mod = data;
+
+       switch (val) {
+       case MODULE_STATE_COMING:
+               ftrace_init_module(mod, mod->ftrace_callsites,
+                                  mod->ftrace_callsites +
+                                  mod->num_ftrace_callsites);
+               break;
+       case MODULE_STATE_GOING:
+               ftrace_release(mod->ftrace_callsites,
+                              mod->ftrace_callsites +
+                              mod->num_ftrace_callsites);
+               break;
+       }
+
+       return 0;
+}
+#else
+static int ftrace_module_notify(struct notifier_block *self,
+                               unsigned long val, void *data)
+{
+       return 0;
+}
+#endif /* CONFIG_MODULES */
+
+struct notifier_block ftrace_module_nb = {
+       .notifier_call = ftrace_module_notify,
+       .priority = 0,
+};
+
 extern unsigned long __start_mcount_loc[];
 extern unsigned long __stop_mcount_loc[];
 
@@ -2801,6 +2751,12 @@ void __init ftrace_init(void)
                                  __start_mcount_loc,
                                  __stop_mcount_loc);
 
+       ret = register_module_notifier(&ftrace_module_nb);
+       if (ret)
+               pr_warning("Failed to register trace ftrace module notifier\n");
+
+       set_ftrace_early_filters();
+
        return;
  failed:
        ftrace_disabled = 1;
@@ -2982,7 +2938,6 @@ static const struct file_operations ftrace_pid_fops = {
 static __init int ftrace_init_debugfs(void)
 {
        struct dentry *d_tracer;
-       struct dentry *entry;
 
        d_tracer = tracing_init_dentry();
        if (!d_tracer)
@@ -2990,11 +2945,8 @@ static __init int ftrace_init_debugfs(void)
 
        ftrace_init_dyn_debugfs(d_tracer);
 
-       entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
-                                   NULL, &ftrace_pid_fops);
-       if (!entry)
-               pr_warning("Could not create debugfs "
-                          "'set_ftrace_pid' entry\n");
+       trace_create_file("set_ftrace_pid", 0644, d_tracer,
+                           NULL, &ftrace_pid_fops);
 
        ftrace_profile_debugfs(d_tracer);
 
@@ -3063,7 +3015,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
 
 int
 ftrace_enable_sysctl(struct ctl_table *table, int write,
-                    struct file *file, void __user *buffer, size_t *lenp,
+                    void __user *buffer, size_t *lenp,
                     loff_t *ppos)
 {
        int ret;
@@ -3073,12 +3025,12 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
 
        mutex_lock(&ftrace_lock);
 
-       ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
+       ret  = proc_dointvec(table, write, buffer, lenp, ppos);
 
-       if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
+       if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
                goto out;
 
-       last_ftrace_enabled = ftrace_enabled;
+       last_ftrace_enabled = !!ftrace_enabled;
 
        if (ftrace_enabled) {
 
@@ -3106,7 +3058,7 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 
-static atomic_t ftrace_graph_active;
+static int ftrace_graph_active;
 static struct notifier_block ftrace_suspend_notifier;
 
 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
@@ -3148,12 +3100,12 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
                }
 
                if (t->ret_stack == NULL) {
-                       t->curr_ret_stack = -1;
-                       /* Make sure IRQs see the -1 first: */
-                       barrier();
-                       t->ret_stack = ret_stack_list[start++];
                        atomic_set(&t->tracing_graph_pause, 0);
                        atomic_set(&t->trace_overrun, 0);
+                       t->curr_ret_stack = -1;
+                       /* Make sure the tasks see the -1 first: */
+                       smp_wmb();
+                       t->ret_stack = ret_stack_list[start++];
                }
        } while_each_thread(g, t);
 
@@ -3211,8 +3163,10 @@ static int start_graph_tracing(void)
                return -ENOMEM;
 
        /* The cpu_boot init_task->ret_stack will never be freed */
-       for_each_online_cpu(cpu)
-               ftrace_graph_init_task(idle_task(cpu));
+       for_each_online_cpu(cpu) {
+               if (!idle_task(cpu)->ret_stack)
+                       ftrace_graph_init_task(idle_task(cpu));
+       }
 
        do {
                ret = alloc_retstack_tasklist(ret_stack_list);
@@ -3258,7 +3212,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
        mutex_lock(&ftrace_lock);
 
        /* we currently allow only one tracer registered at a time */
-       if (atomic_read(&ftrace_graph_active)) {
+       if (ftrace_graph_active) {
                ret = -EBUSY;
                goto out;
        }
@@ -3266,10 +3220,10 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
        ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
        register_pm_notifier(&ftrace_suspend_notifier);
 
-       atomic_inc(&ftrace_graph_active);
+       ftrace_graph_active++;
        ret = start_graph_tracing();
        if (ret) {
-               atomic_dec(&ftrace_graph_active);
+               ftrace_graph_active--;
                goto out;
        }
 
@@ -3287,31 +3241,42 @@ void unregister_ftrace_graph(void)
 {
        mutex_lock(&ftrace_lock);
 
-       atomic_dec(&ftrace_graph_active);
+       if (unlikely(!ftrace_graph_active))
+               goto out;
+
+       ftrace_graph_active--;
        unregister_trace_sched_switch(ftrace_graph_probe_sched_switch);
        ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
        ftrace_graph_entry = ftrace_graph_entry_stub;
        ftrace_shutdown(FTRACE_STOP_FUNC_RET);
        unregister_pm_notifier(&ftrace_suspend_notifier);
 
+ out:
        mutex_unlock(&ftrace_lock);
 }
 
 /* Allocate a return stack for newly created task */
 void ftrace_graph_init_task(struct task_struct *t)
 {
-       if (atomic_read(&ftrace_graph_active)) {
-               t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
+       /* Make sure we do not use the parent ret_stack */
+       t->ret_stack = NULL;
+
+       if (ftrace_graph_active) {
+               struct ftrace_ret_stack *ret_stack;
+
+               ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
                                * sizeof(struct ftrace_ret_stack),
                                GFP_KERNEL);
-               if (!t->ret_stack)
+               if (!ret_stack)
                        return;
                t->curr_ret_stack = -1;
                atomic_set(&t->tracing_graph_pause, 0);
                atomic_set(&t->trace_overrun, 0);
                t->ftrace_timestamp = 0;
-       } else
-               t->ret_stack = NULL;
+               /* make curr_ret_stack visable before we add the ret_stack */
+               smp_wmb();
+               t->ret_stack = ret_stack;
+       }
 }
 
 void ftrace_graph_exit_task(struct task_struct *t)