Merge branch 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[safe/jmp/linux-2.6] / kernel / trace / ftrace.c
index af9d95c..37ba67e 100644 (file)
 #include <linux/list.h>
 #include <linux/hash.h>
 
+#include <trace/events/sched.h>
+
 #include <asm/ftrace.h>
+#include <asm/setup.h>
 
-#include "trace.h"
+#include "trace_output.h"
+#include "trace_stat.h"
 
 #define FTRACE_WARN_ON(cond)                   \
        do {                                    \
@@ -66,7 +70,7 @@ static DEFINE_MUTEX(ftrace_lock);
 
 static struct ftrace_ops ftrace_list_end __read_mostly =
 {
-       .func = ftrace_stub,
+       .func           = ftrace_stub,
 };
 
 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
@@ -218,12 +222,14 @@ static void ftrace_update_pid_func(void)
 {
        ftrace_func_t func;
 
-       mutex_lock(&ftrace_lock);
-
        if (ftrace_trace_function == ftrace_stub)
-               goto out;
+               return;
 
+#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
        func = ftrace_trace_function;
+#else
+       func = __ftrace_trace_function;
+#endif
 
        if (ftrace_pid_trace) {
                set_ftrace_pid_function(func);
@@ -238,11 +244,583 @@ static void ftrace_update_pid_func(void)
 #else
        __ftrace_trace_function = func;
 #endif
+}
+
+#ifdef CONFIG_FUNCTION_PROFILER
+struct ftrace_profile {
+       struct hlist_node               node;
+       unsigned long                   ip;
+       unsigned long                   counter;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       unsigned long long              time;
+#endif
+};
+
+struct ftrace_profile_page {
+       struct ftrace_profile_page      *next;
+       unsigned long                   index;
+       struct ftrace_profile           records[];
+};
+
+struct ftrace_profile_stat {
+       atomic_t                        disabled;
+       struct hlist_head               *hash;
+       struct ftrace_profile_page      *pages;
+       struct ftrace_profile_page      *start;
+       struct tracer_stat              stat;
+};
+
+#define PROFILE_RECORDS_SIZE                                           \
+       (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
+
+#define PROFILES_PER_PAGE                                      \
+       (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
+
+static int ftrace_profile_bits __read_mostly;
+static int ftrace_profile_enabled __read_mostly;
+
+/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
+static DEFINE_MUTEX(ftrace_profile_lock);
+
+static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
+
+#define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
+
+static void *
+function_stat_next(void *v, int idx)
+{
+       struct ftrace_profile *rec = v;
+       struct ftrace_profile_page *pg;
+
+       pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
+
+ again:
+       if (idx != 0)
+               rec++;
+
+       if ((void *)rec >= (void *)&pg->records[pg->index]) {
+               pg = pg->next;
+               if (!pg)
+                       return NULL;
+               rec = &pg->records[0];
+               if (!rec->counter)
+                       goto again;
+       }
+
+       return rec;
+}
+
+static void *function_stat_start(struct tracer_stat *trace)
+{
+       struct ftrace_profile_stat *stat =
+               container_of(trace, struct ftrace_profile_stat, stat);
+
+       if (!stat || !stat->start)
+               return NULL;
+
+       return function_stat_next(&stat->start->records[0], 0);
+}
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+/* function graph compares on total time */
+static int function_stat_cmp(void *p1, void *p2)
+{
+       struct ftrace_profile *a = p1;
+       struct ftrace_profile *b = p2;
+
+       if (a->time < b->time)
+               return -1;
+       if (a->time > b->time)
+               return 1;
+       else
+               return 0;
+}
+#else
+/* not function graph compares against hits */
+static int function_stat_cmp(void *p1, void *p2)
+{
+       struct ftrace_profile *a = p1;
+       struct ftrace_profile *b = p2;
+
+       if (a->counter < b->counter)
+               return -1;
+       if (a->counter > b->counter)
+               return 1;
+       else
+               return 0;
+}
+#endif
+
+static int function_stat_headers(struct seq_file *m)
+{
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       seq_printf(m, "  Function                               "
+                  "Hit    Time            Avg\n"
+                     "  --------                               "
+                  "---    ----            ---\n");
+#else
+       seq_printf(m, "  Function                               Hit\n"
+                     "  --------                               ---\n");
+#endif
+       return 0;
+}
+
+static int function_stat_show(struct seq_file *m, void *v)
+{
+       struct ftrace_profile *rec = v;
+       char str[KSYM_SYMBOL_LEN];
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       static DEFINE_MUTEX(mutex);
+       static struct trace_seq s;
+       unsigned long long avg;
+#endif
+
+       kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
+       seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       seq_printf(m, "    ");
+       avg = rec->time;
+       do_div(avg, rec->counter);
+
+       mutex_lock(&mutex);
+       trace_seq_init(&s);
+       trace_print_graph_duration(rec->time, &s);
+       trace_seq_puts(&s, "    ");
+       trace_print_graph_duration(avg, &s);
+       trace_print_seq(m, &s);
+       mutex_unlock(&mutex);
+#endif
+       seq_putc(m, '\n');
+
+       return 0;
+}
+
+static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
+{
+       struct ftrace_profile_page *pg;
+
+       pg = stat->pages = stat->start;
+
+       while (pg) {
+               memset(pg->records, 0, PROFILE_RECORDS_SIZE);
+               pg->index = 0;
+               pg = pg->next;
+       }
+
+       memset(stat->hash, 0,
+              FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
+}
+
+int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
+{
+       struct ftrace_profile_page *pg;
+       int functions;
+       int pages;
+       int i;
+
+       /* If we already allocated, do nothing */
+       if (stat->pages)
+               return 0;
+
+       stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
+       if (!stat->pages)
+               return -ENOMEM;
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+       functions = ftrace_update_tot_cnt;
+#else
+       /*
+        * We do not know the number of functions that exist because
+        * dynamic tracing is what counts them. With past experience
+        * we have around 20K functions. That should be more than enough.
+        * It is highly unlikely we will execute every function in
+        * the kernel.
+        */
+       functions = 20000;
+#endif
+
+       pg = stat->start = stat->pages;
+
+       pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
+
+       for (i = 0; i < pages; i++) {
+               pg->next = (void *)get_zeroed_page(GFP_KERNEL);
+               if (!pg->next)
+                       goto out_free;
+               pg = pg->next;
+       }
+
+       return 0;
+
+ out_free:
+       pg = stat->start;
+       while (pg) {
+               unsigned long tmp = (unsigned long)pg;
+
+               pg = pg->next;
+               free_page(tmp);
+       }
+
+       free_page((unsigned long)stat->pages);
+       stat->pages = NULL;
+       stat->start = NULL;
+
+       return -ENOMEM;
+}
+
+static int ftrace_profile_init_cpu(int cpu)
+{
+       struct ftrace_profile_stat *stat;
+       int size;
+
+       stat = &per_cpu(ftrace_profile_stats, cpu);
+
+       if (stat->hash) {
+               /* If the profile is already created, simply reset it */
+               ftrace_profile_reset(stat);
+               return 0;
+       }
+
+       /*
+        * We are profiling all functions, but usually only a few thousand
+        * functions are hit. We'll make a hash of 1024 items.
+        */
+       size = FTRACE_PROFILE_HASH_SIZE;
+
+       stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
+
+       if (!stat->hash)
+               return -ENOMEM;
+
+       if (!ftrace_profile_bits) {
+               size--;
+
+               for (; size; size >>= 1)
+                       ftrace_profile_bits++;
+       }
+
+       /* Preallocate the function profiling pages */
+       if (ftrace_profile_pages_init(stat) < 0) {
+               kfree(stat->hash);
+               stat->hash = NULL;
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static int ftrace_profile_init(void)
+{
+       int cpu;
+       int ret = 0;
+
+       for_each_online_cpu(cpu) {
+               ret = ftrace_profile_init_cpu(cpu);
+               if (ret)
+                       break;
+       }
+
+       return ret;
+}
+
+/* interrupts must be disabled */
+static struct ftrace_profile *
+ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
+{
+       struct ftrace_profile *rec;
+       struct hlist_head *hhd;
+       struct hlist_node *n;
+       unsigned long key;
+
+       key = hash_long(ip, ftrace_profile_bits);
+       hhd = &stat->hash[key];
+
+       if (hlist_empty(hhd))
+               return NULL;
+
+       hlist_for_each_entry_rcu(rec, n, hhd, node) {
+               if (rec->ip == ip)
+                       return rec;
+       }
+
+       return NULL;
+}
+
+static void ftrace_add_profile(struct ftrace_profile_stat *stat,
+                              struct ftrace_profile *rec)
+{
+       unsigned long key;
+
+       key = hash_long(rec->ip, ftrace_profile_bits);
+       hlist_add_head_rcu(&rec->node, &stat->hash[key]);
+}
+
+/*
+ * The memory is already allocated, this simply finds a new record to use.
+ */
+static struct ftrace_profile *
+ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
+{
+       struct ftrace_profile *rec = NULL;
+
+       /* prevent recursion (from NMIs) */
+       if (atomic_inc_return(&stat->disabled) != 1)
+               goto out;
+
+       /*
+        * Try to find the function again since an NMI
+        * could have added it
+        */
+       rec = ftrace_find_profiled_func(stat, ip);
+       if (rec)
+               goto out;
+
+       if (stat->pages->index == PROFILES_PER_PAGE) {
+               if (!stat->pages->next)
+                       goto out;
+               stat->pages = stat->pages->next;
+       }
+
+       rec = &stat->pages->records[stat->pages->index++];
+       rec->ip = ip;
+       ftrace_add_profile(stat, rec);
 
  out:
-       mutex_unlock(&ftrace_lock);
+       atomic_dec(&stat->disabled);
+
+       return rec;
+}
+
+static void
+function_profile_call(unsigned long ip, unsigned long parent_ip)
+{
+       struct ftrace_profile_stat *stat;
+       struct ftrace_profile *rec;
+       unsigned long flags;
+
+       if (!ftrace_profile_enabled)
+               return;
+
+       local_irq_save(flags);
+
+       stat = &__get_cpu_var(ftrace_profile_stats);
+       if (!stat->hash || !ftrace_profile_enabled)
+               goto out;
+
+       rec = ftrace_find_profiled_func(stat, ip);
+       if (!rec) {
+               rec = ftrace_profile_alloc(stat, ip);
+               if (!rec)
+                       goto out;
+       }
+
+       rec->counter++;
+ out:
+       local_irq_restore(flags);
+}
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+static int profile_graph_entry(struct ftrace_graph_ent *trace)
+{
+       function_profile_call(trace->func, 0);
+       return 1;
 }
 
+static void profile_graph_return(struct ftrace_graph_ret *trace)
+{
+       struct ftrace_profile_stat *stat;
+       unsigned long long calltime;
+       struct ftrace_profile *rec;
+       unsigned long flags;
+
+       local_irq_save(flags);
+       stat = &__get_cpu_var(ftrace_profile_stats);
+       if (!stat->hash || !ftrace_profile_enabled)
+               goto out;
+
+       calltime = trace->rettime - trace->calltime;
+
+       if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
+               int index;
+
+               index = trace->depth;
+
+               /* Append this call time to the parent time to subtract */
+               if (index)
+                       current->ret_stack[index - 1].subtime += calltime;
+
+               if (current->ret_stack[index].subtime < calltime)
+                       calltime -= current->ret_stack[index].subtime;
+               else
+                       calltime = 0;
+       }
+
+       rec = ftrace_find_profiled_func(stat, trace->func);
+       if (rec)
+               rec->time += calltime;
+
+ out:
+       local_irq_restore(flags);
+}
+
+static int register_ftrace_profiler(void)
+{
+       return register_ftrace_graph(&profile_graph_return,
+                                    &profile_graph_entry);
+}
+
+static void unregister_ftrace_profiler(void)
+{
+       unregister_ftrace_graph();
+}
+#else
+static struct ftrace_ops ftrace_profile_ops __read_mostly =
+{
+       .func           = function_profile_call,
+};
+
+static int register_ftrace_profiler(void)
+{
+       return register_ftrace_function(&ftrace_profile_ops);
+}
+
+static void unregister_ftrace_profiler(void)
+{
+       unregister_ftrace_function(&ftrace_profile_ops);
+}
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+static ssize_t
+ftrace_profile_write(struct file *filp, const char __user *ubuf,
+                    size_t cnt, loff_t *ppos)
+{
+       unsigned long val;
+       char buf[64];           /* big enough to hold a number */
+       int ret;
+
+       if (cnt >= sizeof(buf))
+               return -EINVAL;
+
+       if (copy_from_user(&buf, ubuf, cnt))
+               return -EFAULT;
+
+       buf[cnt] = 0;
+
+       ret = strict_strtoul(buf, 10, &val);
+       if (ret < 0)
+               return ret;
+
+       val = !!val;
+
+       mutex_lock(&ftrace_profile_lock);
+       if (ftrace_profile_enabled ^ val) {
+               if (val) {
+                       ret = ftrace_profile_init();
+                       if (ret < 0) {
+                               cnt = ret;
+                               goto out;
+                       }
+
+                       ret = register_ftrace_profiler();
+                       if (ret < 0) {
+                               cnt = ret;
+                               goto out;
+                       }
+                       ftrace_profile_enabled = 1;
+               } else {
+                       ftrace_profile_enabled = 0;
+                       /*
+                        * unregister_ftrace_profiler calls stop_machine
+                        * so this acts like an synchronize_sched.
+                        */
+                       unregister_ftrace_profiler();
+               }
+       }
+ out:
+       mutex_unlock(&ftrace_profile_lock);
+
+       filp->f_pos += cnt;
+
+       return cnt;
+}
+
+static ssize_t
+ftrace_profile_read(struct file *filp, char __user *ubuf,
+                    size_t cnt, loff_t *ppos)
+{
+       char buf[64];           /* big enough to hold a number */
+       int r;
+
+       r = sprintf(buf, "%u\n", ftrace_profile_enabled);
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static const struct file_operations ftrace_profile_fops = {
+       .open           = tracing_open_generic,
+       .read           = ftrace_profile_read,
+       .write          = ftrace_profile_write,
+};
+
+/* used to initialize the real stat files */
+static struct tracer_stat function_stats __initdata = {
+       .name           = "functions",
+       .stat_start     = function_stat_start,
+       .stat_next      = function_stat_next,
+       .stat_cmp       = function_stat_cmp,
+       .stat_headers   = function_stat_headers,
+       .stat_show      = function_stat_show
+};
+
+static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
+{
+       struct ftrace_profile_stat *stat;
+       struct dentry *entry;
+       char *name;
+       int ret;
+       int cpu;
+
+       for_each_possible_cpu(cpu) {
+               stat = &per_cpu(ftrace_profile_stats, cpu);
+
+               /* allocate enough for function name + cpu number */
+               name = kmalloc(32, GFP_KERNEL);
+               if (!name) {
+                       /*
+                        * The files created are permanent, if something happens
+                        * we still do not free memory.
+                        */
+                       WARN(1,
+                            "Could not allocate stat file for cpu %d\n",
+                            cpu);
+                       return;
+               }
+               stat->stat = function_stats;
+               snprintf(name, 32, "function%d", cpu);
+               stat->stat.name = name;
+               ret = register_stat_tracer(&stat->stat);
+               if (ret) {
+                       WARN(1,
+                            "Could not register function stat for cpu %d\n",
+                            cpu);
+                       kfree(name);
+                       return;
+               }
+       }
+
+       entry = debugfs_create_file("function_profile_enabled", 0644,
+                                   d_tracer, NULL, &ftrace_profile_fops);
+       if (!entry)
+               pr_warning("Could not create debugfs "
+                          "'function_profile_enabled' entry\n");
+}
+
+#else /* CONFIG_FUNCTION_PROFILER */
+static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
+{
+}
+#endif /* CONFIG_FUNCTION_PROFILER */
+
 /* set when tracing only a pid */
 struct pid *ftrace_pid_trace;
 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
@@ -255,16 +833,15 @@ static struct pid * const ftrace_swapper_pid = &init_struct_pid;
 
 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
 
-struct ftrace_func_hook {
+struct ftrace_func_probe {
        struct hlist_node       node;
-       struct ftrace_hook_ops  *ops;
+       struct ftrace_probe_ops *ops;
        unsigned long           flags;
        unsigned long           ip;
        void                    *data;
        struct rcu_head         rcu;
 };
 
-
 enum {
        FTRACE_ENABLE_CALLS             = (1 << 0),
        FTRACE_DISABLE_CALLS            = (1 << 1),
@@ -277,7 +854,7 @@ enum {
 
 static int ftrace_filtered;
 
-static LIST_HEAD(ftrace_new_addrs);
+static struct dyn_ftrace *ftrace_new_addrs;
 
 static DEFINE_MUTEX(ftrace_regex_lock);
 
@@ -344,29 +921,11 @@ static inline int record_frozen(struct dyn_ftrace *rec)
 
 static void ftrace_free_rec(struct dyn_ftrace *rec)
 {
-       rec->ip = (unsigned long)ftrace_free_records;
+       rec->freelist = ftrace_free_records;
        ftrace_free_records = rec;
        rec->flags |= FTRACE_FL_FREE;
 }
 
-void ftrace_release(void *start, unsigned long size)
-{
-       struct dyn_ftrace *rec;
-       struct ftrace_page *pg;
-       unsigned long s = (unsigned long)start;
-       unsigned long e = s + size;
-
-       if (ftrace_disabled || !start)
-               return;
-
-       mutex_lock(&ftrace_lock);
-       do_for_each_ftrace_rec(pg, rec) {
-               if ((rec->ip >= s) && (rec->ip < e))
-                       ftrace_free_rec(rec);
-       } while_for_each_ftrace_rec();
-       mutex_unlock(&ftrace_lock);
-}
-
 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
 {
        struct dyn_ftrace *rec;
@@ -381,7 +940,7 @@ static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
                        return NULL;
                }
 
-               ftrace_free_records = (void *)rec->ip;
+               ftrace_free_records = rec->freelist;
                memset(rec, 0, sizeof(*rec));
                return rec;
        }
@@ -413,8 +972,8 @@ ftrace_record_ip(unsigned long ip)
                return NULL;
 
        rec->ip = ip;
-
-       list_add(&rec->list, &ftrace_new_addrs);
+       rec->newlist = ftrace_new_addrs;
+       ftrace_new_addrs = rec;
 
        return rec;
 }
@@ -461,71 +1020,35 @@ static int
 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
 {
        unsigned long ftrace_addr;
-       unsigned long ip, fl;
+       unsigned long flag = 0UL;
 
        ftrace_addr = (unsigned long)FTRACE_ADDR;
 
-       ip = rec->ip;
-
        /*
-        * If this record is not to be traced and
-        * it is not enabled then do nothing.
+        * If this record is not to be traced or we want to disable it,
+        * then disable it.
         *
-        * If this record is not to be traced and
-        * it is enabled then disable it.
+        * If we want to enable it and filtering is off, then enable it.
         *
+        * If we want to enable it and filtering is on, enable it only if
+        * it's filtered
         */
-       if (rec->flags & FTRACE_FL_NOTRACE) {
-               if (rec->flags & FTRACE_FL_ENABLED)
-                       rec->flags &= ~FTRACE_FL_ENABLED;
-               else
-                       return 0;
-
-       } else if (ftrace_filtered && enable) {
-               /*
-                * Filtering is on:
-                */
-
-               fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
-
-               /* Record is filtered and enabled, do nothing */
-               if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
-                       return 0;
-
-               /* Record is not filtered or enabled, do nothing */
-               if (!fl)
-                       return 0;
-
-               /* Record is not filtered but enabled, disable it */
-               if (fl == FTRACE_FL_ENABLED)
-                       rec->flags &= ~FTRACE_FL_ENABLED;
-               else
-               /* Otherwise record is filtered but not enabled, enable it */
-                       rec->flags |= FTRACE_FL_ENABLED;
-       } else {
-               /* Disable or not filtered */
-
-               if (enable) {
-                       /* if record is enabled, do nothing */
-                       if (rec->flags & FTRACE_FL_ENABLED)
-                               return 0;
-
-                       rec->flags |= FTRACE_FL_ENABLED;
-
-               } else {
+       if (enable && !(rec->flags & FTRACE_FL_NOTRACE)) {
+               if (!ftrace_filtered || (rec->flags & FTRACE_FL_FILTER))
+                       flag = FTRACE_FL_ENABLED;
+       }
 
-                       /* if record is not enabled, do nothing */
-                       if (!(rec->flags & FTRACE_FL_ENABLED))
-                               return 0;
+       /* If the state of this record hasn't changed, then do nothing */
+       if ((rec->flags & FTRACE_FL_ENABLED) == flag)
+               return 0;
 
-                       rec->flags &= ~FTRACE_FL_ENABLED;
-               }
+       if (flag) {
+               rec->flags |= FTRACE_FL_ENABLED;
+               return ftrace_make_call(rec, ftrace_addr);
        }
 
-       if (rec->flags & FTRACE_FL_ENABLED)
-               return ftrace_make_call(rec, ftrace_addr);
-       else
-               return ftrace_make_nop(NULL, rec, ftrace_addr);
+       rec->flags &= ~FTRACE_FL_ENABLED;
+       return ftrace_make_nop(NULL, rec, ftrace_addr);
 }
 
 static void ftrace_replace_code(int enable)
@@ -536,11 +1059,12 @@ static void ftrace_replace_code(int enable)
 
        do_for_each_ftrace_rec(pg, rec) {
                /*
-                * Skip over free records and records that have
-                * failed.
+                * Skip over free records, records that have
+                * failed and not converted.
                 */
                if (rec->flags & FTRACE_FL_FREE ||
-                   rec->flags & FTRACE_FL_FAILED)
+                   rec->flags & FTRACE_FL_FAILED ||
+                   !(rec->flags & FTRACE_FL_CONVERTED))
                        continue;
 
                /* ignore updates to this record's mcount site */
@@ -552,13 +1076,11 @@ static void ftrace_replace_code(int enable)
                }
 
                failed = __ftrace_replace_code(rec, enable);
-               if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
+               if (failed) {
                        rec->flags |= FTRACE_FL_FAILED;
-                       if ((system_state == SYSTEM_BOOTING) ||
-                           !core_kernel_text(rec->ip)) {
-                               ftrace_free_rec(rec);
-                       } else
-                               ftrace_bug(failed, rec->ip);
+                       ftrace_bug(failed, rec->ip);
+                       /* Stop processing */
+                       return;
                }
        } while_for_each_ftrace_rec();
 }
@@ -580,6 +1102,24 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
        return 1;
 }
 
+/*
+ * archs can override this function if they must do something
+ * before the modifying code is performed.
+ */
+int __weak ftrace_arch_code_modify_prepare(void)
+{
+       return 0;
+}
+
+/*
+ * archs can override this function if they must do something
+ * after the modifying code is performed.
+ */
+int __weak ftrace_arch_code_modify_post_process(void)
+{
+       return 0;
+}
+
 static int __ftrace_modify_code(void *data)
 {
        int *command = data;
@@ -602,7 +1142,17 @@ static int __ftrace_modify_code(void *data)
 
 static void ftrace_run_update_code(int command)
 {
+       int ret;
+
+       ret = ftrace_arch_code_modify_prepare();
+       FTRACE_WARN_ON(ret);
+       if (ret)
+               return;
+
        stop_machine(__ftrace_modify_code, &command, NULL);
+
+       ret = ftrace_arch_code_modify_post_process();
+       FTRACE_WARN_ON(ret);
 }
 
 static ftrace_func_t saved_ftrace_func;
@@ -638,6 +1188,13 @@ static void ftrace_shutdown(int command)
                return;
 
        ftrace_start_up--;
+       /*
+        * Just warn in case of unbalance, no need to kill ftrace, it's not
+        * critical but the ftrace_call callers may be never nopped again after
+        * further ftrace uses.
+        */
+       WARN_ON_ONCE(ftrace_start_up < 0);
+
        if (!ftrace_start_up)
                command |= FTRACE_DISABLE_CALLS;
 
@@ -688,19 +1245,21 @@ unsigned long            ftrace_update_tot_cnt;
 
 static int ftrace_update_code(struct module *mod)
 {
-       struct dyn_ftrace *p, *t;
+       struct dyn_ftrace *p;
        cycle_t start, stop;
 
        start = ftrace_now(raw_smp_processor_id());
        ftrace_update_cnt = 0;
 
-       list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
+       while (ftrace_new_addrs) {
 
                /* If something went wrong, bail without enabling anything */
                if (unlikely(ftrace_disabled))
                        return -1;
 
-               list_del_init(&p->list);
+               p = ftrace_new_addrs;
+               ftrace_new_addrs = p->newlist;
+               p->flags = 0L;
 
                /* convert record (i.e, patch mcount-call with NOP) */
                if (ftrace_code_disable(mod, p)) {
@@ -763,11 +1322,10 @@ static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
 
 enum {
        FTRACE_ITER_FILTER      = (1 << 0),
-       FTRACE_ITER_CONT        = (1 << 1),
-       FTRACE_ITER_NOTRACE     = (1 << 2),
-       FTRACE_ITER_FAILURES    = (1 << 3),
-       FTRACE_ITER_PRINTALL    = (1 << 4),
-       FTRACE_ITER_HASH        = (1 << 5),
+       FTRACE_ITER_NOTRACE     = (1 << 1),
+       FTRACE_ITER_FAILURES    = (1 << 2),
+       FTRACE_ITER_PRINTALL    = (1 << 3),
+       FTRACE_ITER_HASH        = (1 << 4),
 };
 
 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
@@ -777,9 +1335,7 @@ struct ftrace_iterator {
        int                     hidx;
        int                     idx;
        unsigned                flags;
-       unsigned char           buffer[FTRACE_BUFF_MAX+1];
-       unsigned                buffer_idx;
-       unsigned                filtered;
+       struct trace_parser     parser;
 };
 
 static void *
@@ -822,28 +1378,33 @@ static void *t_hash_start(struct seq_file *m, loff_t *pos)
 {
        struct ftrace_iterator *iter = m->private;
        void *p = NULL;
+       loff_t l;
+
+       if (!(iter->flags & FTRACE_ITER_HASH))
+               *pos = 0;
 
        iter->flags |= FTRACE_ITER_HASH;
 
-       return t_hash_next(m, p, pos);
+       iter->hidx = 0;
+       for (l = 0; l <= *pos; ) {
+               p = t_hash_next(m, p, &l);
+               if (!p)
+                       break;
+       }
+       return p;
 }
 
 static int t_hash_show(struct seq_file *m, void *v)
 {
-       struct ftrace_func_hook *rec;
+       struct ftrace_func_probe *rec;
        struct hlist_node *hnd = v;
-       char str[KSYM_SYMBOL_LEN];
 
-       rec = hlist_entry(hnd, struct ftrace_func_hook, node);
+       rec = hlist_entry(hnd, struct ftrace_func_probe, node);
 
        if (rec->ops->print)
                return rec->ops->print(m, rec->ip, rec->ops, rec->data);
 
-       kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
-       seq_printf(m, "%s:", str);
-
-       kallsyms_lookup((unsigned long)rec->ops->func, NULL, NULL, NULL, str);
-       seq_printf(m, "%s", str);
+       seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
 
        if (rec->data)
                seq_printf(m, ":%p", rec->data);
@@ -872,8 +1433,6 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
                        iter->pg = iter->pg->next;
                        iter->idx = 0;
                        goto retry;
-               } else {
-                       iter->idx = -1;
                }
        } else {
                rec = &iter->pg->records[iter->idx++];
@@ -902,6 +1461,7 @@ static void *t_start(struct seq_file *m, loff_t *pos)
 {
        struct ftrace_iterator *iter = m->private;
        void *p = NULL;
+       loff_t l;
 
        mutex_lock(&ftrace_lock);
        /*
@@ -913,23 +1473,21 @@ static void *t_start(struct seq_file *m, loff_t *pos)
                if (*pos > 0)
                        return t_hash_start(m, pos);
                iter->flags |= FTRACE_ITER_PRINTALL;
-               (*pos)++;
                return iter;
        }
 
        if (iter->flags & FTRACE_ITER_HASH)
                return t_hash_start(m, pos);
 
-       if (*pos > 0) {
-               if (iter->idx < 0)
-                       return p;
-               (*pos)--;
-               iter->idx--;
+       iter->pg = ftrace_pages_start;
+       iter->idx = 0;
+       for (l = 0; l <= *pos; ) {
+               p = t_next(m, p, &l);
+               if (!p)
+                       break;
        }
 
-       p = t_next(m, p, pos);
-
-       if (!p)
+       if (!p && iter->flags & FTRACE_ITER_FILTER)
                return t_hash_start(m, pos);
 
        return p;
@@ -944,7 +1502,6 @@ static int t_show(struct seq_file *m, void *v)
 {
        struct ftrace_iterator *iter = m->private;
        struct dyn_ftrace *rec = v;
-       char str[KSYM_SYMBOL_LEN];
 
        if (iter->flags & FTRACE_ITER_HASH)
                return t_hash_show(m, v);
@@ -957,14 +1514,12 @@ static int t_show(struct seq_file *m, void *v)
        if (!rec)
                return 0;
 
-       kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
-
-       seq_printf(m, "%s\n", str);
+       seq_printf(m, "%ps\n", (void *)rec->ip);
 
        return 0;
 }
 
-static struct seq_operations show_ftrace_seq_ops = {
+static const struct seq_operations show_ftrace_seq_ops = {
        .start = t_start,
        .next = t_next,
        .stop = t_stop,
@@ -998,17 +1553,6 @@ ftrace_avail_open(struct inode *inode, struct file *file)
        return ret;
 }
 
-int ftrace_avail_release(struct inode *inode, struct file *file)
-{
-       struct seq_file *m = (struct seq_file *)file->private_data;
-       struct ftrace_iterator *iter = m->private;
-
-       seq_release(inode, file);
-       kfree(iter);
-
-       return 0;
-}
-
 static int
 ftrace_failures_open(struct inode *inode, struct file *file)
 {
@@ -1057,9 +1601,14 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable)
        if (!iter)
                return -ENOMEM;
 
+       if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
+               kfree(iter);
+               return -ENOMEM;
+       }
+
        mutex_lock(&ftrace_regex_lock);
        if ((file->f_mode & FMODE_WRITE) &&
-           !(file->f_flags & O_APPEND))
+           (file->f_flags & O_TRUNC))
                ftrace_filter_reset(enable);
 
        if (file->f_mode & FMODE_READ) {
@@ -1071,8 +1620,10 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable)
                if (!ret) {
                        struct seq_file *m = file->private_data;
                        m->private = iter;
-               } else
+               } else {
+                       trace_parser_put(&iter->parser);
                        kfree(iter);
+               }
        } else
                file->private_data = iter;
        mutex_unlock(&ftrace_regex_lock);
@@ -1092,16 +1643,6 @@ ftrace_notrace_open(struct inode *inode, struct file *file)
        return ftrace_regex_open(inode, file, 0);
 }
 
-static ssize_t
-ftrace_regex_read(struct file *file, char __user *ubuf,
-                      size_t cnt, loff_t *ppos)
-{
-       if (file->f_mode & FMODE_READ)
-               return seq_read(file, ubuf, cnt, ppos);
-       else
-               return -EPERM;
-}
-
 static loff_t
 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
 {
@@ -1351,9 +1892,9 @@ static int __init ftrace_mod_cmd_init(void)
 device_initcall(ftrace_mod_cmd_init);
 
 static void
-function_trace_hook_call(unsigned long ip, unsigned long parent_ip)
+function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
 {
-       struct ftrace_func_hook *entry;
+       struct ftrace_func_probe *entry;
        struct hlist_head *hhd;
        struct hlist_node *n;
        unsigned long key;
@@ -1379,18 +1920,18 @@ function_trace_hook_call(unsigned long ip, unsigned long parent_ip)
        ftrace_preempt_enable(resched);
 }
 
-static struct ftrace_ops trace_hook_ops __read_mostly =
+static struct ftrace_ops trace_probe_ops __read_mostly =
 {
-       .func = function_trace_hook_call,
+       .func           = function_trace_probe_call,
 };
 
-static int ftrace_hook_registered;
+static int ftrace_probe_registered;
 
-static void __enable_ftrace_function_hook(void)
+static void __enable_ftrace_function_probe(void)
 {
        int i;
 
-       if (ftrace_hook_registered)
+       if (ftrace_probe_registered)
                return;
 
        for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
@@ -1402,16 +1943,16 @@ static void __enable_ftrace_function_hook(void)
        if (i == FTRACE_FUNC_HASHSIZE)
                return;
 
-       __register_ftrace_function(&trace_hook_ops);
+       __register_ftrace_function(&trace_probe_ops);
        ftrace_startup(0);
-       ftrace_hook_registered = 1;
+       ftrace_probe_registered = 1;
 }
 
-static void __disable_ftrace_function_hook(void)
+static void __disable_ftrace_function_probe(void)
 {
        int i;
 
-       if (!ftrace_hook_registered)
+       if (!ftrace_probe_registered)
                return;
 
        for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
@@ -1421,16 +1962,16 @@ static void __disable_ftrace_function_hook(void)
        }
 
        /* no more funcs left */
-       __unregister_ftrace_function(&trace_hook_ops);
+       __unregister_ftrace_function(&trace_probe_ops);
        ftrace_shutdown(0);
-       ftrace_hook_registered = 0;
+       ftrace_probe_registered = 0;
 }
 
 
 static void ftrace_free_entry_rcu(struct rcu_head *rhp)
 {
-       struct ftrace_func_hook *entry =
-               container_of(rhp, struct ftrace_func_hook, rcu);
+       struct ftrace_func_probe *entry =
+               container_of(rhp, struct ftrace_func_probe, rcu);
 
        if (entry->ops->free)
                entry->ops->free(&entry->data);
@@ -1439,10 +1980,10 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
 
 
 int
-register_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
+register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
                              void *data)
 {
-       struct ftrace_func_hook *entry;
+       struct ftrace_func_probe *entry;
        struct ftrace_page *pg;
        struct dyn_ftrace *rec;
        int type, len, not;
@@ -1453,7 +1994,7 @@ register_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
        type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
        len = strlen(search);
 
-       /* we do not support '!' for function hooks */
+       /* we do not support '!' for function probes */
        if (WARN_ON(not))
                return -EINVAL;
 
@@ -1468,7 +2009,7 @@ register_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
 
                entry = kmalloc(sizeof(*entry), GFP_KERNEL);
                if (!entry) {
-                       /* If we did not hook to any, then return error */
+                       /* If we did not process any, then return error */
                        if (!count)
                                count = -ENOMEM;
                        goto out_unlock;
@@ -1498,7 +2039,7 @@ register_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
                hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
 
        } while_for_each_ftrace_rec();
-       __enable_ftrace_function_hook();
+       __enable_ftrace_function_probe();
 
  out_unlock:
        mutex_unlock(&ftrace_lock);
@@ -1507,30 +2048,30 @@ register_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
 }
 
 enum {
-       HOOK_TEST_FUNC          = 1,
-       HOOK_TEST_DATA          = 2
+       PROBE_TEST_FUNC         = 1,
+       PROBE_TEST_DATA         = 2
 };
 
 static void
-__unregister_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
+__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
                                  void *data, int flags)
 {
-       struct ftrace_func_hook *entry;
+       struct ftrace_func_probe *entry;
        struct hlist_node *n, *tmp;
        char str[KSYM_SYMBOL_LEN];
        int type = MATCH_FULL;
        int i, len = 0;
        char *search;
 
-       if (glob && (strcmp(glob, "*") || !strlen(glob)))
+       if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
                glob = NULL;
-       else {
+       else if (glob) {
                int not;
 
                type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
                len = strlen(search);
 
-               /* we do not support '!' for function hooks */
+               /* we do not support '!' for function probes */
                if (WARN_ON(not))
                        return;
        }
@@ -1542,10 +2083,10 @@ __unregister_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
                hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
 
                        /* break up if statements for readability */
-                       if ((flags & HOOK_TEST_FUNC) && entry->ops != ops)
+                       if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
                                continue;
 
-                       if ((flags & HOOK_TEST_DATA) && entry->data != data)
+                       if ((flags & PROBE_TEST_DATA) && entry->data != data)
                                continue;
 
                        /* do this last, since it is the most expensive */
@@ -1560,27 +2101,27 @@ __unregister_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
                        call_rcu(&entry->rcu, ftrace_free_entry_rcu);
                }
        }
-       __disable_ftrace_function_hook();
+       __disable_ftrace_function_probe();
        mutex_unlock(&ftrace_lock);
 }
 
 void
-unregister_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
+unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
                                void *data)
 {
-       __unregister_ftrace_function_hook(glob, ops, data,
-                                         HOOK_TEST_FUNC | HOOK_TEST_DATA);
+       __unregister_ftrace_function_probe(glob, ops, data,
+                                         PROBE_TEST_FUNC | PROBE_TEST_DATA);
 }
 
 void
-unregister_ftrace_function_hook_func(char *glob, struct ftrace_hook_ops *ops)
+unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
 {
-       __unregister_ftrace_function_hook(glob, ops, NULL, HOOK_TEST_FUNC);
+       __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
 }
 
-void unregister_ftrace_function_hook_all(char *glob)
+void unregister_ftrace_function_probe_all(char *glob)
 {
-       __unregister_ftrace_function_hook(glob, NULL, NULL, 0);
+       __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
 }
 
 static LIST_HEAD(ftrace_commands);
@@ -1659,11 +2200,10 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
                   size_t cnt, loff_t *ppos, int enable)
 {
        struct ftrace_iterator *iter;
-       char ch;
-       size_t read = 0;
-       ssize_t ret;
+       struct trace_parser *parser;
+       ssize_t ret, read;
 
-       if (!cnt || cnt < 0)
+       if (!cnt)
                return 0;
 
        mutex_lock(&ftrace_regex_lock);
@@ -1674,68 +2214,23 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
        } else
                iter = file->private_data;
 
-       if (!*ppos) {
-               iter->flags &= ~FTRACE_ITER_CONT;
-               iter->buffer_idx = 0;
-       }
-
-       ret = get_user(ch, ubuf++);
-       if (ret)
-               goto out;
-       read++;
-       cnt--;
-
-       if (!(iter->flags & ~FTRACE_ITER_CONT)) {
-               /* skip white space */
-               while (cnt && isspace(ch)) {
-                       ret = get_user(ch, ubuf++);
-                       if (ret)
-                               goto out;
-                       read++;
-                       cnt--;
-               }
-
-               if (isspace(ch)) {
-                       file->f_pos += read;
-                       ret = read;
-                       goto out;
-               }
-
-               iter->buffer_idx = 0;
-       }
-
-       while (cnt && !isspace(ch)) {
-               if (iter->buffer_idx < FTRACE_BUFF_MAX)
-                       iter->buffer[iter->buffer_idx++] = ch;
-               else {
-                       ret = -EINVAL;
-                       goto out;
-               }
-               ret = get_user(ch, ubuf++);
-               if (ret)
-                       goto out;
-               read++;
-               cnt--;
-       }
+       parser = &iter->parser;
+       read = trace_get_user(parser, ubuf, cnt, ppos);
 
-       if (isspace(ch)) {
-               iter->filtered++;
-               iter->buffer[iter->buffer_idx] = 0;
-               ret = ftrace_process_regex(iter->buffer,
-                                          iter->buffer_idx, enable);
+       if (read >= 0 && trace_parser_loaded(parser) &&
+           !trace_parser_cont(parser)) {
+               ret = ftrace_process_regex(parser->buffer,
+                                          parser->idx, enable);
                if (ret)
                        goto out;
-               iter->buffer_idx = 0;
-       } else
-               iter->flags |= FTRACE_ITER_CONT;
-
 
-       file->f_pos += read;
+               trace_parser_clear(parser);
+       }
 
        ret = read;
- out:
-       mutex_unlock(&ftrace_regex_lock);
 
+       mutex_unlock(&ftrace_regex_lock);
+out:
        return ret;
 }
 
@@ -1796,11 +2291,51 @@ void ftrace_set_notrace(unsigned char *buf, int len, int reset)
        ftrace_set_regex(buf, len, reset, 0);
 }
 
+/*
+ * command line interface to allow users to set filters on boot up.
+ */
+#define FTRACE_FILTER_SIZE             COMMAND_LINE_SIZE
+static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
+static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
+
+static int __init set_ftrace_notrace(char *str)
+{
+       strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
+       return 1;
+}
+__setup("ftrace_notrace=", set_ftrace_notrace);
+
+static int __init set_ftrace_filter(char *str)
+{
+       strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
+       return 1;
+}
+__setup("ftrace_filter=", set_ftrace_filter);
+
+static void __init set_ftrace_early_filter(char *buf, int enable)
+{
+       char *func;
+
+       while (buf) {
+               func = strsep(&buf, ",");
+               ftrace_set_regex(func, strlen(func), 0, enable);
+       }
+}
+
+static void __init set_ftrace_early_filters(void)
+{
+       if (ftrace_filter_buf[0])
+               set_ftrace_early_filter(ftrace_filter_buf, 1);
+       if (ftrace_notrace_buf[0])
+               set_ftrace_early_filter(ftrace_notrace_buf, 0);
+}
+
 static int
 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
 {
        struct seq_file *m = (struct seq_file *)file->private_data;
        struct ftrace_iterator *iter;
+       struct trace_parser *parser;
 
        mutex_lock(&ftrace_regex_lock);
        if (file->f_mode & FMODE_READ) {
@@ -1810,10 +2345,10 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable)
        } else
                iter = file->private_data;
 
-       if (iter->buffer_idx) {
-               iter->filtered++;
-               iter->buffer[iter->buffer_idx] = 0;
-               ftrace_match_records(iter->buffer, iter->buffer_idx, enable);
+       parser = &iter->parser;
+       if (trace_parser_loaded(parser)) {
+               parser->buffer[parser->idx] = 0;
+               ftrace_match_records(parser->buffer, parser->idx, enable);
        }
 
        mutex_lock(&ftrace_lock);
@@ -1821,7 +2356,9 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable)
                ftrace_run_update_code(FTRACE_ENABLE_CALLS);
        mutex_unlock(&ftrace_lock);
 
+       trace_parser_put(parser);
        kfree(iter);
+
        mutex_unlock(&ftrace_regex_lock);
        return 0;
 }
@@ -1838,31 +2375,31 @@ ftrace_notrace_release(struct inode *inode, struct file *file)
        return ftrace_regex_release(inode, file, 0);
 }
 
-static struct file_operations ftrace_avail_fops = {
+static const struct file_operations ftrace_avail_fops = {
        .open = ftrace_avail_open,
        .read = seq_read,
        .llseek = seq_lseek,
-       .release = ftrace_avail_release,
+       .release = seq_release_private,
 };
 
-static struct file_operations ftrace_failures_fops = {
+static const struct file_operations ftrace_failures_fops = {
        .open = ftrace_failures_open,
        .read = seq_read,
        .llseek = seq_lseek,
-       .release = ftrace_avail_release,
+       .release = seq_release_private,
 };
 
-static struct file_operations ftrace_filter_fops = {
+static const struct file_operations ftrace_filter_fops = {
        .open = ftrace_filter_open,
-       .read = ftrace_regex_read,
+       .read = seq_read,
        .write = ftrace_filter_write,
        .llseek = ftrace_regex_lseek,
        .release = ftrace_filter_release,
 };
 
-static struct file_operations ftrace_notrace_fops = {
+static const struct file_operations ftrace_notrace_fops = {
        .open = ftrace_notrace_open,
-       .read = ftrace_regex_read,
+       .read = seq_read,
        .write = ftrace_notrace_write,
        .llseek = ftrace_regex_lseek,
        .release = ftrace_notrace_release,
@@ -1876,28 +2413,29 @@ int ftrace_graph_count;
 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
 
 static void *
-g_next(struct seq_file *m, void *v, loff_t *pos)
+__g_next(struct seq_file *m, loff_t *pos)
 {
-       unsigned long *array = m->private;
-       int index = *pos;
-
-       (*pos)++;
-
-       if (index >= ftrace_graph_count)
+       if (*pos >= ftrace_graph_count)
                return NULL;
+       return &ftrace_graph_funcs[*pos];
+}
 
-       return &array[index];
+static void *
+g_next(struct seq_file *m, void *v, loff_t *pos)
+{
+       (*pos)++;
+       return __g_next(m, pos);
 }
 
 static void *g_start(struct seq_file *m, loff_t *pos)
 {
-       void *p = NULL;
-
        mutex_lock(&graph_lock);
 
-       p = g_next(m, p, pos);
+       /* Nothing, tell g_show to print all functions are enabled */
+       if (!ftrace_graph_count && !*pos)
+               return (void *)1;
 
-       return p;
+       return __g_next(m, pos);
 }
 
 static void g_stop(struct seq_file *m, void *p)
@@ -1908,19 +2446,21 @@ static void g_stop(struct seq_file *m, void *p)
 static int g_show(struct seq_file *m, void *v)
 {
        unsigned long *ptr = v;
-       char str[KSYM_SYMBOL_LEN];
 
        if (!ptr)
                return 0;
 
-       kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
+       if (ptr == (unsigned long *)1) {
+               seq_printf(m, "#### all functions enabled ####\n");
+               return 0;
+       }
 
-       seq_printf(m, "%s\n", str);
+       seq_printf(m, "%ps\n", (void *)*ptr);
 
        return 0;
 }
 
-static struct seq_operations ftrace_graph_seq_ops = {
+static const struct seq_operations ftrace_graph_seq_ops = {
        .start = g_start,
        .next = g_next,
        .stop = g_stop,
@@ -1937,67 +2477,72 @@ ftrace_graph_open(struct inode *inode, struct file *file)
 
        mutex_lock(&graph_lock);
        if ((file->f_mode & FMODE_WRITE) &&
-           !(file->f_flags & O_APPEND)) {
+           (file->f_flags & O_TRUNC)) {
                ftrace_graph_count = 0;
                memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
        }
+       mutex_unlock(&graph_lock);
 
-       if (file->f_mode & FMODE_READ) {
+       if (file->f_mode & FMODE_READ)
                ret = seq_open(file, &ftrace_graph_seq_ops);
-               if (!ret) {
-                       struct seq_file *m = file->private_data;
-                       m->private = ftrace_graph_funcs;
-               }
-       } else
-               file->private_data = ftrace_graph_funcs;
-       mutex_unlock(&graph_lock);
 
        return ret;
 }
 
-static ssize_t
-ftrace_graph_read(struct file *file, char __user *ubuf,
-                      size_t cnt, loff_t *ppos)
+static int
+ftrace_graph_release(struct inode *inode, struct file *file)
 {
        if (file->f_mode & FMODE_READ)
-               return seq_read(file, ubuf, cnt, ppos);
-       else
-               return -EPERM;
+               seq_release(inode, file);
+       return 0;
 }
 
 static int
-ftrace_set_func(unsigned long *array, int idx, char *buffer)
+ftrace_set_func(unsigned long *array, int *idx, char *buffer)
 {
-       char str[KSYM_SYMBOL_LEN];
        struct dyn_ftrace *rec;
        struct ftrace_page *pg;
+       int search_len;
        int found = 0;
-       int j;
+       int type, not;
+       char *search;
+       bool exists;
+       int i;
 
        if (ftrace_disabled)
                return -ENODEV;
 
+       /* decode regex */
+       type = ftrace_setup_glob(buffer, strlen(buffer), &search, &not);
+       if (not)
+               return -EINVAL;
+
+       search_len = strlen(search);
+
        mutex_lock(&ftrace_lock);
        do_for_each_ftrace_rec(pg, rec) {
 
+               if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
+                       break;
+
                if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
                        continue;
 
-               kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
-               if (strcmp(str, buffer) == 0) {
-                       /* Return 1 if we add it to the array */
-                       found = 1;
-                       for (j = 0; j < idx; j++)
-                               if (array[j] == rec->ip) {
-                                       found = 0;
+               if (ftrace_match_record(rec, search, search_len, type)) {
+                       /* ensure it is not already in the array */
+                       exists = false;
+                       for (i = 0; i < *idx; i++)
+                               if (array[i] == rec->ip) {
+                                       exists = true;
                                        break;
                                }
-                       if (found)
-                               array[idx] = rec->ip;
-                       goto out;
+                       if (!exists) {
+                               array[(*idx)++] = rec->ip;
+                               found = 1;
+                       }
                }
        } while_for_each_ftrace_rec();
- out:
+
        mutex_unlock(&ftrace_lock);
 
        return found ? 0 : -EINVAL;
@@ -2007,12 +2552,8 @@ static ssize_t
 ftrace_graph_write(struct file *file, const char __user *ubuf,
                   size_t cnt, loff_t *ppos)
 {
-       unsigned char buffer[FTRACE_BUFF_MAX+1];
-       unsigned long *array;
-       size_t read = 0;
-       ssize_t ret;
-       int index = 0;
-       char ch;
+       struct trace_parser parser;
+       ssize_t read, ret;
 
        if (!cnt || cnt < 0)
                return 0;
@@ -2021,108 +2562,63 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
 
        if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
                ret = -EBUSY;
-               goto out;
+               goto out_unlock;
        }
 
-       if (file->f_mode & FMODE_READ) {
-               struct seq_file *m = file->private_data;
-               array = m->private;
-       } else
-               array = file->private_data;
-
-       ret = get_user(ch, ubuf++);
-       if (ret)
-               goto out;
-       read++;
-       cnt--;
-
-       /* skip white space */
-       while (cnt && isspace(ch)) {
-               ret = get_user(ch, ubuf++);
-               if (ret)
-                       goto out;
-               read++;
-               cnt--;
+       if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
+               ret = -ENOMEM;
+               goto out_unlock;
        }
 
-       if (isspace(ch)) {
-               *ppos += read;
-               ret = read;
-               goto out;
-       }
+       read = trace_get_user(&parser, ubuf, cnt, ppos);
 
-       while (cnt && !isspace(ch)) {
-               if (index < FTRACE_BUFF_MAX)
-                       buffer[index++] = ch;
-               else {
-                       ret = -EINVAL;
-                       goto out;
-               }
-               ret = get_user(ch, ubuf++);
+       if (read >= 0 && trace_parser_loaded((&parser))) {
+               parser.buffer[parser.idx] = 0;
+
+               /* we allow only one expression at a time */
+               ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
+                                       parser.buffer);
                if (ret)
-                       goto out;
-               read++;
-               cnt--;
+                       goto out_free;
        }
-       buffer[index] = 0;
-
-       /* we allow only one at a time */
-       ret = ftrace_set_func(array, ftrace_graph_count, buffer);
-       if (ret)
-               goto out;
-
-       ftrace_graph_count++;
-
-       file->f_pos += read;
 
        ret = read;
- out:
+
+out_free:
+       trace_parser_put(&parser);
+out_unlock:
        mutex_unlock(&graph_lock);
 
        return ret;
 }
 
 static const struct file_operations ftrace_graph_fops = {
-       .open = ftrace_graph_open,
-       .read = ftrace_graph_read,
-       .write = ftrace_graph_write,
+       .open           = ftrace_graph_open,
+       .read           = seq_read,
+       .write          = ftrace_graph_write,
+       .release        = ftrace_graph_release,
 };
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 
 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
 {
-       struct dentry *entry;
 
-       entry = debugfs_create_file("available_filter_functions", 0444,
-                                   d_tracer, NULL, &ftrace_avail_fops);
-       if (!entry)
-               pr_warning("Could not create debugfs "
-                          "'available_filter_functions' entry\n");
+       trace_create_file("available_filter_functions", 0444,
+                       d_tracer, NULL, &ftrace_avail_fops);
 
-       entry = debugfs_create_file("failures", 0444,
-                                   d_tracer, NULL, &ftrace_failures_fops);
-       if (!entry)
-               pr_warning("Could not create debugfs 'failures' entry\n");
+       trace_create_file("failures", 0444,
+                       d_tracer, NULL, &ftrace_failures_fops);
 
-       entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
-                                   NULL, &ftrace_filter_fops);
-       if (!entry)
-               pr_warning("Could not create debugfs "
-                          "'set_ftrace_filter' entry\n");
+       trace_create_file("set_ftrace_filter", 0644, d_tracer,
+                       NULL, &ftrace_filter_fops);
 
-       entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
+       trace_create_file("set_ftrace_notrace", 0644, d_tracer,
                                    NULL, &ftrace_notrace_fops);
-       if (!entry)
-               pr_warning("Could not create debugfs "
-                          "'set_ftrace_notrace' entry\n");
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-       entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
+       trace_create_file("set_graph_function", 0444, d_tracer,
                                    NULL,
                                    &ftrace_graph_fops);
-       if (!entry)
-               pr_warning("Could not create debugfs "
-                          "'set_graph_function' entry\n");
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 
        return 0;
@@ -2160,14 +2656,68 @@ static int ftrace_convert_nops(struct module *mod,
        return 0;
 }
 
-void ftrace_init_module(struct module *mod,
-                       unsigned long *start, unsigned long *end)
+#ifdef CONFIG_MODULES
+void ftrace_release_mod(struct module *mod)
+{
+       struct dyn_ftrace *rec;
+       struct ftrace_page *pg;
+
+       if (ftrace_disabled)
+               return;
+
+       mutex_lock(&ftrace_lock);
+       do_for_each_ftrace_rec(pg, rec) {
+               if (within_module_core(rec->ip, mod)) {
+                       /*
+                        * rec->ip is changed in ftrace_free_rec()
+                        * It should not between s and e if record was freed.
+                        */
+                       FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
+                       ftrace_free_rec(rec);
+               }
+       } while_for_each_ftrace_rec();
+       mutex_unlock(&ftrace_lock);
+}
+
+static void ftrace_init_module(struct module *mod,
+                              unsigned long *start, unsigned long *end)
 {
        if (ftrace_disabled || start == end)
                return;
        ftrace_convert_nops(mod, start, end);
 }
 
+static int ftrace_module_notify(struct notifier_block *self,
+                               unsigned long val, void *data)
+{
+       struct module *mod = data;
+
+       switch (val) {
+       case MODULE_STATE_COMING:
+               ftrace_init_module(mod, mod->ftrace_callsites,
+                                  mod->ftrace_callsites +
+                                  mod->num_ftrace_callsites);
+               break;
+       case MODULE_STATE_GOING:
+               ftrace_release_mod(mod);
+               break;
+       }
+
+       return 0;
+}
+#else
+static int ftrace_module_notify(struct notifier_block *self,
+                               unsigned long val, void *data)
+{
+       return 0;
+}
+#endif /* CONFIG_MODULES */
+
+struct notifier_block ftrace_module_nb = {
+       .notifier_call = ftrace_module_notify,
+       .priority = 0,
+};
+
 extern unsigned long __start_mcount_loc[];
 extern unsigned long __stop_mcount_loc[];
 
@@ -2199,6 +2749,12 @@ void __init ftrace_init(void)
                                  __start_mcount_loc,
                                  __stop_mcount_loc);
 
+       ret = register_module_notifier(&ftrace_module_nb);
+       if (ret)
+               pr_warning("Failed to register trace ftrace module notifier\n");
+
+       set_ftrace_early_filters();
+
        return;
  failed:
        ftrace_disabled = 1;
@@ -2232,7 +2788,7 @@ ftrace_pid_read(struct file *file, char __user *ubuf,
        if (ftrace_pid_trace == ftrace_swapper_pid)
                r = sprintf(buf, "swapper tasks\n");
        else if (ftrace_pid_trace)
-               r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace));
+               r = sprintf(buf, "%u\n", pid_vnr(ftrace_pid_trace));
        else
                r = sprintf(buf, "no pid\n");
 
@@ -2372,7 +2928,7 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf,
        return cnt;
 }
 
-static struct file_operations ftrace_pid_fops = {
+static const struct file_operations ftrace_pid_fops = {
        .read = ftrace_pid_read,
        .write = ftrace_pid_write,
 };
@@ -2380,7 +2936,6 @@ static struct file_operations ftrace_pid_fops = {
 static __init int ftrace_init_debugfs(void)
 {
        struct dentry *d_tracer;
-       struct dentry *entry;
 
        d_tracer = tracing_init_dentry();
        if (!d_tracer)
@@ -2388,11 +2943,11 @@ static __init int ftrace_init_debugfs(void)
 
        ftrace_init_dyn_debugfs(d_tracer);
 
-       entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
-                                   NULL, &ftrace_pid_fops);
-       if (!entry)
-               pr_warning("Could not create debugfs "
-                          "'set_ftrace_pid' entry\n");
+       trace_create_file("set_ftrace_pid", 0644, d_tracer,
+                           NULL, &ftrace_pid_fops);
+
+       ftrace_profile_debugfs(d_tracer);
+
        return 0;
 }
 fs_initcall(ftrace_init_debugfs);
@@ -2458,7 +3013,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
 
 int
 ftrace_enable_sysctl(struct ctl_table *table, int write,
-                    struct file *file, void __user *buffer, size_t *lenp,
+                    void __user *buffer, size_t *lenp,
                     loff_t *ppos)
 {
        int ret;
@@ -2468,12 +3023,12 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
 
        mutex_lock(&ftrace_lock);
 
-       ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
+       ret  = proc_dointvec(table, write, buffer, lenp, ppos);
 
-       if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
+       if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
                goto out;
 
-       last_ftrace_enabled = ftrace_enabled;
+       last_ftrace_enabled = !!ftrace_enabled;
 
        if (ftrace_enabled) {
 
@@ -2501,7 +3056,7 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 
-static atomic_t ftrace_graph_active;
+static int ftrace_graph_active;
 static struct notifier_block ftrace_suspend_notifier;
 
 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
@@ -2543,12 +3098,12 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
                }
 
                if (t->ret_stack == NULL) {
-                       t->curr_ret_stack = -1;
-                       /* Make sure IRQs see the -1 first: */
-                       barrier();
-                       t->ret_stack = ret_stack_list[start++];
                        atomic_set(&t->tracing_graph_pause, 0);
                        atomic_set(&t->trace_overrun, 0);
+                       t->curr_ret_stack = -1;
+                       /* Make sure the tasks see the -1 first: */
+                       smp_wmb();
+                       t->ret_stack = ret_stack_list[start++];
                }
        } while_each_thread(g, t);
 
@@ -2560,11 +3115,43 @@ free:
        return ret;
 }
 
+static void
+ftrace_graph_probe_sched_switch(struct rq *__rq, struct task_struct *prev,
+                               struct task_struct *next)
+{
+       unsigned long long timestamp;
+       int index;
+
+       /*
+        * Does the user want to count the time a function was asleep.
+        * If so, do not update the time stamps.
+        */
+       if (trace_flags & TRACE_ITER_SLEEP_TIME)
+               return;
+
+       timestamp = trace_clock_local();
+
+       prev->ftrace_timestamp = timestamp;
+
+       /* only process tasks that we timestamped */
+       if (!next->ftrace_timestamp)
+               return;
+
+       /*
+        * Update all the counters in next to make up for the
+        * time next was sleeping.
+        */
+       timestamp -= next->ftrace_timestamp;
+
+       for (index = next->curr_ret_stack; index >= 0; index--)
+               next->ret_stack[index].calltime += timestamp;
+}
+
 /* Allocate a return stack for each task */
 static int start_graph_tracing(void)
 {
        struct ftrace_ret_stack **ret_stack_list;
-       int ret;
+       int ret, cpu;
 
        ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
                                sizeof(struct ftrace_ret_stack *),
@@ -2573,10 +3160,23 @@ static int start_graph_tracing(void)
        if (!ret_stack_list)
                return -ENOMEM;
 
+       /* The cpu_boot init_task->ret_stack will never be freed */
+       for_each_online_cpu(cpu) {
+               if (!idle_task(cpu)->ret_stack)
+                       ftrace_graph_init_task(idle_task(cpu));
+       }
+
        do {
                ret = alloc_retstack_tasklist(ret_stack_list);
        } while (ret == -EAGAIN);
 
+       if (!ret) {
+               ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch);
+               if (ret)
+                       pr_info("ftrace_graph: Couldn't activate tracepoint"
+                               " probe to kernel_sched_switch\n");
+       }
+
        kfree(ret_stack_list);
        return ret;
 }
@@ -2609,13 +3209,19 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
 
        mutex_lock(&ftrace_lock);
 
+       /* we currently allow only one tracer registered at a time */
+       if (ftrace_graph_active) {
+               ret = -EBUSY;
+               goto out;
+       }
+
        ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
        register_pm_notifier(&ftrace_suspend_notifier);
 
-       atomic_inc(&ftrace_graph_active);
+       ftrace_graph_active++;
        ret = start_graph_tracing();
        if (ret) {
-               atomic_dec(&ftrace_graph_active);
+               ftrace_graph_active--;
                goto out;
        }
 
@@ -2633,29 +3239,42 @@ void unregister_ftrace_graph(void)
 {
        mutex_lock(&ftrace_lock);
 
-       atomic_dec(&ftrace_graph_active);
+       if (unlikely(!ftrace_graph_active))
+               goto out;
+
+       ftrace_graph_active--;
+       unregister_trace_sched_switch(ftrace_graph_probe_sched_switch);
        ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
        ftrace_graph_entry = ftrace_graph_entry_stub;
        ftrace_shutdown(FTRACE_STOP_FUNC_RET);
        unregister_pm_notifier(&ftrace_suspend_notifier);
 
+ out:
        mutex_unlock(&ftrace_lock);
 }
 
 /* Allocate a return stack for newly created task */
 void ftrace_graph_init_task(struct task_struct *t)
 {
-       if (atomic_read(&ftrace_graph_active)) {
-               t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
+       /* Make sure we do not use the parent ret_stack */
+       t->ret_stack = NULL;
+
+       if (ftrace_graph_active) {
+               struct ftrace_ret_stack *ret_stack;
+
+               ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
                                * sizeof(struct ftrace_ret_stack),
                                GFP_KERNEL);
-               if (!t->ret_stack)
+               if (!ret_stack)
                        return;
                t->curr_ret_stack = -1;
                atomic_set(&t->tracing_graph_pause, 0);
                atomic_set(&t->trace_overrun, 0);
-       } else
-               t->ret_stack = NULL;
+               t->ftrace_timestamp = 0;
+               /* make curr_ret_stack visable before we add the ret_stack */
+               smp_wmb();
+               t->ret_stack = ret_stack;
+       }
 }
 
 void ftrace_graph_exit_task(struct task_struct *t)