sysctl: remove "struct file *" argument of ->proc_handler
[safe/jmp/linux-2.6] / kernel / trace / ftrace.c
index 2971fe4..a142579 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/clocksource.h>
 #include <linux/kallsyms.h>
 #include <linux/seq_file.h>
+#include <linux/suspend.h>
 #include <linux/debugfs.h>
 #include <linux/hardirq.h>
 #include <linux/kthread.h>
 #include <linux/sysctl.h>
 #include <linux/ctype.h>
 #include <linux/list.h>
+#include <linux/hash.h>
+
+#include <trace/events/sched.h>
 
 #include <asm/ftrace.h>
+#include <asm/setup.h>
 
-#include "trace.h"
+#include "trace_output.h"
+#include "trace_stat.h"
 
 #define FTRACE_WARN_ON(cond)                   \
        do {                                    \
                        ftrace_kill();          \
        } while (0)
 
+/* hash bits for specific function selection */
+#define FTRACE_HASH_BITS 7
+#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
+
 /* ftrace_enabled is a method to turn ftrace on or off */
 int ftrace_enabled __read_mostly;
 static int last_ftrace_enabled;
 
-/* set when tracing only a pid */
-struct pid *ftrace_pid_trace;
-static struct pid * const ftrace_swapper_pid = &init_struct_pid;
-
 /* Quick disabling of function tracer. */
 int function_trace_stop;
 
@@ -60,13 +66,11 @@ int function_trace_stop;
  */
 static int ftrace_disabled __read_mostly;
 
-static DEFINE_SPINLOCK(ftrace_lock);
-static DEFINE_MUTEX(ftrace_sysctl_lock);
-static DEFINE_MUTEX(ftrace_start_lock);
+static DEFINE_MUTEX(ftrace_lock);
 
 static struct ftrace_ops ftrace_list_end __read_mostly =
 {
-       .func = ftrace_stub,
+       .func           = ftrace_stub,
 };
 
 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
@@ -133,9 +137,6 @@ static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
 
 static int __register_ftrace_function(struct ftrace_ops *ops)
 {
-       /* should not be called from interrupt context */
-       spin_lock(&ftrace_lock);
-
        ops->next = ftrace_list;
        /*
         * We are entering ops into the ftrace_list but another
@@ -171,18 +172,12 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
 #endif
        }
 
-       spin_unlock(&ftrace_lock);
-
        return 0;
 }
 
 static int __unregister_ftrace_function(struct ftrace_ops *ops)
 {
        struct ftrace_ops **p;
-       int ret = 0;
-
-       /* should not be called from interrupt context */
-       spin_lock(&ftrace_lock);
 
        /*
         * If we are removing the last function, then simply point
@@ -191,17 +186,15 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
        if (ftrace_list == ops && ops->next == &ftrace_list_end) {
                ftrace_trace_function = ftrace_stub;
                ftrace_list = &ftrace_list_end;
-               goto out;
+               return 0;
        }
 
        for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
                if (*p == ops)
                        break;
 
-       if (*p != ops) {
-               ret = -1;
-               goto out;
-       }
+       if (*p != ops)
+               return -1;
 
        *p = (*p)->next;
 
@@ -222,21 +215,15 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
                }
        }
 
- out:
-       spin_unlock(&ftrace_lock);
-
-       return ret;
+       return 0;
 }
 
 static void ftrace_update_pid_func(void)
 {
        ftrace_func_t func;
 
-       /* should not be called from interrupt context */
-       spin_lock(&ftrace_lock);
-
        if (ftrace_trace_function == ftrace_stub)
-               goto out;
+               return;
 
        func = ftrace_trace_function;
 
@@ -253,859 +240,1958 @@ static void ftrace_update_pid_func(void)
 #else
        __ftrace_trace_function = func;
 #endif
-
- out:
-       spin_unlock(&ftrace_lock);
 }
 
-#ifdef CONFIG_DYNAMIC_FTRACE
-#ifndef CONFIG_FTRACE_MCOUNT_RECORD
-# error Dynamic ftrace depends on MCOUNT_RECORD
+#ifdef CONFIG_FUNCTION_PROFILER
+struct ftrace_profile {
+       struct hlist_node               node;
+       unsigned long                   ip;
+       unsigned long                   counter;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       unsigned long long              time;
 #endif
-
-/*
- * Since MCOUNT_ADDR may point to mcount itself, we do not want
- * to get it confused by reading a reference in the code as we
- * are parsing on objcopy output of text. Use a variable for
- * it instead.
- */
-static unsigned long mcount_addr = MCOUNT_ADDR;
-
-enum {
-       FTRACE_ENABLE_CALLS             = (1 << 0),
-       FTRACE_DISABLE_CALLS            = (1 << 1),
-       FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
-       FTRACE_ENABLE_MCOUNT            = (1 << 3),
-       FTRACE_DISABLE_MCOUNT           = (1 << 4),
-       FTRACE_START_FUNC_RET           = (1 << 5),
-       FTRACE_STOP_FUNC_RET            = (1 << 6),
 };
 
-static int ftrace_filtered;
+struct ftrace_profile_page {
+       struct ftrace_profile_page      *next;
+       unsigned long                   index;
+       struct ftrace_profile           records[];
+};
 
-static LIST_HEAD(ftrace_new_addrs);
+struct ftrace_profile_stat {
+       atomic_t                        disabled;
+       struct hlist_head               *hash;
+       struct ftrace_profile_page      *pages;
+       struct ftrace_profile_page      *start;
+       struct tracer_stat              stat;
+};
 
-static DEFINE_MUTEX(ftrace_regex_lock);
+#define PROFILE_RECORDS_SIZE                                           \
+       (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
 
-struct ftrace_page {
-       struct ftrace_page      *next;
-       unsigned long           index;
-       struct dyn_ftrace       records[];
-};
+#define PROFILES_PER_PAGE                                      \
+       (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
 
-#define ENTRIES_PER_PAGE \
-  ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
+static int ftrace_profile_bits __read_mostly;
+static int ftrace_profile_enabled __read_mostly;
 
-/* estimate from running different kernels */
-#define NR_TO_INIT             10000
+/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
+static DEFINE_MUTEX(ftrace_profile_lock);
 
-static struct ftrace_page      *ftrace_pages_start;
-static struct ftrace_page      *ftrace_pages;
+static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
 
-static struct dyn_ftrace *ftrace_free_records;
+#define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
 
+static void *
+function_stat_next(void *v, int idx)
+{
+       struct ftrace_profile *rec = v;
+       struct ftrace_profile_page *pg;
 
-#ifdef CONFIG_KPROBES
+       pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
 
-static int frozen_record_count;
+ again:
+       if (idx != 0)
+               rec++;
 
-static inline void freeze_record(struct dyn_ftrace *rec)
-{
-       if (!(rec->flags & FTRACE_FL_FROZEN)) {
-               rec->flags |= FTRACE_FL_FROZEN;
-               frozen_record_count++;
+       if ((void *)rec >= (void *)&pg->records[pg->index]) {
+               pg = pg->next;
+               if (!pg)
+                       return NULL;
+               rec = &pg->records[0];
+               if (!rec->counter)
+                       goto again;
        }
+
+       return rec;
 }
 
-static inline void unfreeze_record(struct dyn_ftrace *rec)
+static void *function_stat_start(struct tracer_stat *trace)
 {
-       if (rec->flags & FTRACE_FL_FROZEN) {
-               rec->flags &= ~FTRACE_FL_FROZEN;
-               frozen_record_count--;
-       }
+       struct ftrace_profile_stat *stat =
+               container_of(trace, struct ftrace_profile_stat, stat);
+
+       if (!stat || !stat->start)
+               return NULL;
+
+       return function_stat_next(&stat->start->records[0], 0);
 }
 
-static inline int record_frozen(struct dyn_ftrace *rec)
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+/* function graph compares on total time */
+static int function_stat_cmp(void *p1, void *p2)
 {
-       return rec->flags & FTRACE_FL_FROZEN;
+       struct ftrace_profile *a = p1;
+       struct ftrace_profile *b = p2;
+
+       if (a->time < b->time)
+               return -1;
+       if (a->time > b->time)
+               return 1;
+       else
+               return 0;
 }
 #else
-# define freeze_record(rec)                    ({ 0; })
-# define unfreeze_record(rec)                  ({ 0; })
-# define record_frozen(rec)                    ({ 0; })
-#endif /* CONFIG_KPROBES */
-
-static void ftrace_free_rec(struct dyn_ftrace *rec)
+/* not function graph compares against hits */
+static int function_stat_cmp(void *p1, void *p2)
 {
-       rec->ip = (unsigned long)ftrace_free_records;
-       ftrace_free_records = rec;
-       rec->flags |= FTRACE_FL_FREE;
+       struct ftrace_profile *a = p1;
+       struct ftrace_profile *b = p2;
+
+       if (a->counter < b->counter)
+               return -1;
+       if (a->counter > b->counter)
+               return 1;
+       else
+               return 0;
 }
+#endif
 
-void ftrace_release(void *start, unsigned long size)
+static int function_stat_headers(struct seq_file *m)
 {
-       struct dyn_ftrace *rec;
-       struct ftrace_page *pg;
-       unsigned long s = (unsigned long)start;
-       unsigned long e = s + size;
-       int i;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       seq_printf(m, "  Function                               "
+                  "Hit    Time            Avg\n"
+                     "  --------                               "
+                  "---    ----            ---\n");
+#else
+       seq_printf(m, "  Function                               Hit\n"
+                     "  --------                               ---\n");
+#endif
+       return 0;
+}
 
-       if (ftrace_disabled || !start)
-               return;
+static int function_stat_show(struct seq_file *m, void *v)
+{
+       struct ftrace_profile *rec = v;
+       char str[KSYM_SYMBOL_LEN];
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       static DEFINE_MUTEX(mutex);
+       static struct trace_seq s;
+       unsigned long long avg;
+#endif
 
-       /* should not be called from interrupt context */
-       spin_lock(&ftrace_lock);
+       kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
+       seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
 
-       for (pg = ftrace_pages_start; pg; pg = pg->next) {
-               for (i = 0; i < pg->index; i++) {
-                       rec = &pg->records[i];
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       seq_printf(m, "    ");
+       avg = rec->time;
+       do_div(avg, rec->counter);
+
+       mutex_lock(&mutex);
+       trace_seq_init(&s);
+       trace_print_graph_duration(rec->time, &s);
+       trace_seq_puts(&s, "    ");
+       trace_print_graph_duration(avg, &s);
+       trace_print_seq(m, &s);
+       mutex_unlock(&mutex);
+#endif
+       seq_putc(m, '\n');
 
-                       if ((rec->ip >= s) && (rec->ip < e))
-                               ftrace_free_rec(rec);
-               }
-       }
-       spin_unlock(&ftrace_lock);
+       return 0;
 }
 
-static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
+static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
 {
-       struct dyn_ftrace *rec;
-
-       /* First check for freed records */
-       if (ftrace_free_records) {
-               rec = ftrace_free_records;
-
-               if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
-                       FTRACE_WARN_ON_ONCE(1);
-                       ftrace_free_records = NULL;
-                       return NULL;
-               }
+       struct ftrace_profile_page *pg;
 
-               ftrace_free_records = (void *)rec->ip;
-               memset(rec, 0, sizeof(*rec));
-               return rec;
-       }
+       pg = stat->pages = stat->start;
 
-       if (ftrace_pages->index == ENTRIES_PER_PAGE) {
-               if (!ftrace_pages->next) {
-                       /* allocate another page */
-                       ftrace_pages->next =
-                               (void *)get_zeroed_page(GFP_KERNEL);
-                       if (!ftrace_pages->next)
-                               return NULL;
-               }
-               ftrace_pages = ftrace_pages->next;
+       while (pg) {
+               memset(pg->records, 0, PROFILE_RECORDS_SIZE);
+               pg->index = 0;
+               pg = pg->next;
        }
 
-       return &ftrace_pages->records[ftrace_pages->index++];
+       memset(stat->hash, 0,
+              FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
 }
 
-static struct dyn_ftrace *
-ftrace_record_ip(unsigned long ip)
+int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
 {
-       struct dyn_ftrace *rec;
+       struct ftrace_profile_page *pg;
+       int functions;
+       int pages;
+       int i;
 
-       if (ftrace_disabled)
-               return NULL;
+       /* If we already allocated, do nothing */
+       if (stat->pages)
+               return 0;
 
-       rec = ftrace_alloc_dyn_node(ip);
-       if (!rec)
-               return NULL;
+       stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
+       if (!stat->pages)
+               return -ENOMEM;
 
-       rec->ip = ip;
+#ifdef CONFIG_DYNAMIC_FTRACE
+       functions = ftrace_update_tot_cnt;
+#else
+       /*
+        * We do not know the number of functions that exist because
+        * dynamic tracing is what counts them. With past experience
+        * we have around 20K functions. That should be more than enough.
+        * It is highly unlikely we will execute every function in
+        * the kernel.
+        */
+       functions = 20000;
+#endif
 
-       list_add(&rec->list, &ftrace_new_addrs);
+       pg = stat->start = stat->pages;
 
-       return rec;
-}
+       pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
 
-static void print_ip_ins(const char *fmt, unsigned char *p)
-{
-       int i;
+       for (i = 0; i < pages; i++) {
+               pg->next = (void *)get_zeroed_page(GFP_KERNEL);
+               if (!pg->next)
+                       goto out_free;
+               pg = pg->next;
+       }
 
-       printk(KERN_CONT "%s", fmt);
+       return 0;
 
-       for (i = 0; i < MCOUNT_INSN_SIZE; i++)
-               printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
-}
+ out_free:
+       pg = stat->start;
+       while (pg) {
+               unsigned long tmp = (unsigned long)pg;
 
-static void ftrace_bug(int failed, unsigned long ip)
-{
-       switch (failed) {
-       case -EFAULT:
-               FTRACE_WARN_ON_ONCE(1);
-               pr_info("ftrace faulted on modifying ");
-               print_ip_sym(ip);
-               break;
-       case -EINVAL:
-               FTRACE_WARN_ON_ONCE(1);
-               pr_info("ftrace failed to modify ");
-               print_ip_sym(ip);
-               print_ip_ins(" actual: ", (unsigned char *)ip);
-               printk(KERN_CONT "\n");
-               break;
-       case -EPERM:
-               FTRACE_WARN_ON_ONCE(1);
-               pr_info("ftrace faulted on writing ");
-               print_ip_sym(ip);
-               break;
-       default:
-               FTRACE_WARN_ON_ONCE(1);
-               pr_info("ftrace faulted on unknown error ");
-               print_ip_sym(ip);
+               pg = pg->next;
+               free_page(tmp);
        }
-}
 
+       free_page((unsigned long)stat->pages);
+       stat->pages = NULL;
+       stat->start = NULL;
 
-static int
-__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
+       return -ENOMEM;
+}
+
+static int ftrace_profile_init_cpu(int cpu)
 {
-       unsigned long ip, fl;
-       unsigned long ftrace_addr;
+       struct ftrace_profile_stat *stat;
+       int size;
 
-       ftrace_addr = (unsigned long)ftrace_caller;
+       stat = &per_cpu(ftrace_profile_stats, cpu);
 
-       ip = rec->ip;
+       if (stat->hash) {
+               /* If the profile is already created, simply reset it */
+               ftrace_profile_reset(stat);
+               return 0;
+       }
 
        /*
-        * If this record is not to be traced and
-        * it is not enabled then do nothing.
-        *
-        * If this record is not to be traced and
-        * it is enabled then disabled it.
-        *
+        * We are profiling all functions, but usually only a few thousand
+        * functions are hit. We'll make a hash of 1024 items.
         */
-       if (rec->flags & FTRACE_FL_NOTRACE) {
-               if (rec->flags & FTRACE_FL_ENABLED)
-                       rec->flags &= ~FTRACE_FL_ENABLED;
-               else
-                       return 0;
-
-       } else if (ftrace_filtered && enable) {
-               /*
-                * Filtering is on:
-                */
+       size = FTRACE_PROFILE_HASH_SIZE;
 
-               fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
+       stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
 
-               /* Record is filtered and enabled, do nothing */
-               if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
-                       return 0;
-
-               /* Record is not filtered and is not enabled do nothing */
-               if (!fl)
-                       return 0;
+       if (!stat->hash)
+               return -ENOMEM;
 
-               /* Record is not filtered but enabled, disable it */
-               if (fl == FTRACE_FL_ENABLED)
-                       rec->flags &= ~FTRACE_FL_ENABLED;
-               else
-               /* Otherwise record is filtered but not enabled, enable it */
-                       rec->flags |= FTRACE_FL_ENABLED;
-       } else {
-               /* Disable or not filtered */
+       if (!ftrace_profile_bits) {
+               size--;
 
-               if (enable) {
-                       /* if record is enabled, do nothing */
-                       if (rec->flags & FTRACE_FL_ENABLED)
-                               return 0;
+               for (; size; size >>= 1)
+                       ftrace_profile_bits++;
+       }
 
-                       rec->flags |= FTRACE_FL_ENABLED;
+       /* Preallocate the function profiling pages */
+       if (ftrace_profile_pages_init(stat) < 0) {
+               kfree(stat->hash);
+               stat->hash = NULL;
+               return -ENOMEM;
+       }
 
-               } else {
+       return 0;
+}
 
-                       /* if record is not enabled do nothing */
-                       if (!(rec->flags & FTRACE_FL_ENABLED))
-                               return 0;
+static int ftrace_profile_init(void)
+{
+       int cpu;
+       int ret = 0;
 
-                       rec->flags &= ~FTRACE_FL_ENABLED;
-               }
+       for_each_online_cpu(cpu) {
+               ret = ftrace_profile_init_cpu(cpu);
+               if (ret)
+                       break;
        }
 
-       if (rec->flags & FTRACE_FL_ENABLED)
-               return ftrace_make_call(rec, ftrace_addr);
-       else
-               return ftrace_make_nop(NULL, rec, ftrace_addr);
+       return ret;
 }
 
-static void ftrace_replace_code(int enable)
+/* interrupts must be disabled */
+static struct ftrace_profile *
+ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
 {
-       int i, failed;
-       struct dyn_ftrace *rec;
-       struct ftrace_page *pg;
-
-       for (pg = ftrace_pages_start; pg; pg = pg->next) {
-               for (i = 0; i < pg->index; i++) {
-                       rec = &pg->records[i];
+       struct ftrace_profile *rec;
+       struct hlist_head *hhd;
+       struct hlist_node *n;
+       unsigned long key;
 
-                       /*
-                        * Skip over free records and records that have
-                        * failed.
-                        */
-                       if (rec->flags & FTRACE_FL_FREE ||
-                           rec->flags & FTRACE_FL_FAILED)
-                               continue;
+       key = hash_long(ip, ftrace_profile_bits);
+       hhd = &stat->hash[key];
 
-                       /* ignore updates to this record's mcount site */
-                       if (get_kprobe((void *)rec->ip)) {
-                               freeze_record(rec);
-                               continue;
-                       } else {
-                               unfreeze_record(rec);
-                       }
+       if (hlist_empty(hhd))
+               return NULL;
 
-                       failed = __ftrace_replace_code(rec, enable);
-                       if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
-                               rec->flags |= FTRACE_FL_FAILED;
-                               if ((system_state == SYSTEM_BOOTING) ||
-                                   !core_kernel_text(rec->ip)) {
-                                       ftrace_free_rec(rec);
-                               } else
-                                       ftrace_bug(failed, rec->ip);
-                       }
-               }
+       hlist_for_each_entry_rcu(rec, n, hhd, node) {
+               if (rec->ip == ip)
+                       return rec;
        }
+
+       return NULL;
 }
 
-static int
-ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
+static void ftrace_add_profile(struct ftrace_profile_stat *stat,
+                              struct ftrace_profile *rec)
 {
-       unsigned long ip;
-       int ret;
+       unsigned long key;
 
-       ip = rec->ip;
+       key = hash_long(rec->ip, ftrace_profile_bits);
+       hlist_add_head_rcu(&rec->node, &stat->hash[key]);
+}
 
-       ret = ftrace_make_nop(mod, rec, mcount_addr);
+/*
+ * The memory is already allocated, this simply finds a new record to use.
+ */
+static struct ftrace_profile *
+ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
+{
+       struct ftrace_profile *rec = NULL;
+
+       /* prevent recursion (from NMIs) */
+       if (atomic_inc_return(&stat->disabled) != 1)
+               goto out;
+
+       /*
+        * Try to find the function again since an NMI
+        * could have added it
+        */
+       rec = ftrace_find_profiled_func(stat, ip);
+       if (rec)
+               goto out;
+
+       if (stat->pages->index == PROFILES_PER_PAGE) {
+               if (!stat->pages->next)
+                       goto out;
+               stat->pages = stat->pages->next;
+       }
+
+       rec = &stat->pages->records[stat->pages->index++];
+       rec->ip = ip;
+       ftrace_add_profile(stat, rec);
+
+ out:
+       atomic_dec(&stat->disabled);
+
+       return rec;
+}
+
+static void
+function_profile_call(unsigned long ip, unsigned long parent_ip)
+{
+       struct ftrace_profile_stat *stat;
+       struct ftrace_profile *rec;
+       unsigned long flags;
+
+       if (!ftrace_profile_enabled)
+               return;
+
+       local_irq_save(flags);
+
+       stat = &__get_cpu_var(ftrace_profile_stats);
+       if (!stat->hash || !ftrace_profile_enabled)
+               goto out;
+
+       rec = ftrace_find_profiled_func(stat, ip);
+       if (!rec) {
+               rec = ftrace_profile_alloc(stat, ip);
+               if (!rec)
+                       goto out;
+       }
+
+       rec->counter++;
+ out:
+       local_irq_restore(flags);
+}
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+static int profile_graph_entry(struct ftrace_graph_ent *trace)
+{
+       function_profile_call(trace->func, 0);
+       return 1;
+}
+
+static void profile_graph_return(struct ftrace_graph_ret *trace)
+{
+       struct ftrace_profile_stat *stat;
+       unsigned long long calltime;
+       struct ftrace_profile *rec;
+       unsigned long flags;
+
+       local_irq_save(flags);
+       stat = &__get_cpu_var(ftrace_profile_stats);
+       if (!stat->hash || !ftrace_profile_enabled)
+               goto out;
+
+       calltime = trace->rettime - trace->calltime;
+
+       if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
+               int index;
+
+               index = trace->depth;
+
+               /* Append this call time to the parent time to subtract */
+               if (index)
+                       current->ret_stack[index - 1].subtime += calltime;
+
+               if (current->ret_stack[index].subtime < calltime)
+                       calltime -= current->ret_stack[index].subtime;
+               else
+                       calltime = 0;
+       }
+
+       rec = ftrace_find_profiled_func(stat, trace->func);
+       if (rec)
+               rec->time += calltime;
+
+ out:
+       local_irq_restore(flags);
+}
+
+static int register_ftrace_profiler(void)
+{
+       return register_ftrace_graph(&profile_graph_return,
+                                    &profile_graph_entry);
+}
+
+static void unregister_ftrace_profiler(void)
+{
+       unregister_ftrace_graph();
+}
+#else
+static struct ftrace_ops ftrace_profile_ops __read_mostly =
+{
+       .func           = function_profile_call,
+};
+
+static int register_ftrace_profiler(void)
+{
+       return register_ftrace_function(&ftrace_profile_ops);
+}
+
+static void unregister_ftrace_profiler(void)
+{
+       unregister_ftrace_function(&ftrace_profile_ops);
+}
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+static ssize_t
+ftrace_profile_write(struct file *filp, const char __user *ubuf,
+                    size_t cnt, loff_t *ppos)
+{
+       unsigned long val;
+       char buf[64];           /* big enough to hold a number */
+       int ret;
+
+       if (cnt >= sizeof(buf))
+               return -EINVAL;
+
+       if (copy_from_user(&buf, ubuf, cnt))
+               return -EFAULT;
+
+       buf[cnt] = 0;
+
+       ret = strict_strtoul(buf, 10, &val);
+       if (ret < 0)
+               return ret;
+
+       val = !!val;
+
+       mutex_lock(&ftrace_profile_lock);
+       if (ftrace_profile_enabled ^ val) {
+               if (val) {
+                       ret = ftrace_profile_init();
+                       if (ret < 0) {
+                               cnt = ret;
+                               goto out;
+                       }
+
+                       ret = register_ftrace_profiler();
+                       if (ret < 0) {
+                               cnt = ret;
+                               goto out;
+                       }
+                       ftrace_profile_enabled = 1;
+               } else {
+                       ftrace_profile_enabled = 0;
+                       /*
+                        * unregister_ftrace_profiler calls stop_machine
+                        * so this acts like an synchronize_sched.
+                        */
+                       unregister_ftrace_profiler();
+               }
+       }
+ out:
+       mutex_unlock(&ftrace_profile_lock);
+
+       filp->f_pos += cnt;
+
+       return cnt;
+}
+
+static ssize_t
+ftrace_profile_read(struct file *filp, char __user *ubuf,
+                    size_t cnt, loff_t *ppos)
+{
+       char buf[64];           /* big enough to hold a number */
+       int r;
+
+       r = sprintf(buf, "%u\n", ftrace_profile_enabled);
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static const struct file_operations ftrace_profile_fops = {
+       .open           = tracing_open_generic,
+       .read           = ftrace_profile_read,
+       .write          = ftrace_profile_write,
+};
+
+/* used to initialize the real stat files */
+static struct tracer_stat function_stats __initdata = {
+       .name           = "functions",
+       .stat_start     = function_stat_start,
+       .stat_next      = function_stat_next,
+       .stat_cmp       = function_stat_cmp,
+       .stat_headers   = function_stat_headers,
+       .stat_show      = function_stat_show
+};
+
+static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
+{
+       struct ftrace_profile_stat *stat;
+       struct dentry *entry;
+       char *name;
+       int ret;
+       int cpu;
+
+       for_each_possible_cpu(cpu) {
+               stat = &per_cpu(ftrace_profile_stats, cpu);
+
+               /* allocate enough for function name + cpu number */
+               name = kmalloc(32, GFP_KERNEL);
+               if (!name) {
+                       /*
+                        * The files created are permanent, if something happens
+                        * we still do not free memory.
+                        */
+                       WARN(1,
+                            "Could not allocate stat file for cpu %d\n",
+                            cpu);
+                       return;
+               }
+               stat->stat = function_stats;
+               snprintf(name, 32, "function%d", cpu);
+               stat->stat.name = name;
+               ret = register_stat_tracer(&stat->stat);
+               if (ret) {
+                       WARN(1,
+                            "Could not register function stat for cpu %d\n",
+                            cpu);
+                       kfree(name);
+                       return;
+               }
+       }
+
+       entry = debugfs_create_file("function_profile_enabled", 0644,
+                                   d_tracer, NULL, &ftrace_profile_fops);
+       if (!entry)
+               pr_warning("Could not create debugfs "
+                          "'function_profile_enabled' entry\n");
+}
+
+#else /* CONFIG_FUNCTION_PROFILER */
+static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
+{
+}
+#endif /* CONFIG_FUNCTION_PROFILER */
+
+/* set when tracing only a pid */
+struct pid *ftrace_pid_trace;
+static struct pid * const ftrace_swapper_pid = &init_struct_pid;
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+#ifndef CONFIG_FTRACE_MCOUNT_RECORD
+# error Dynamic ftrace depends on MCOUNT_RECORD
+#endif
+
+static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
+
+struct ftrace_func_probe {
+       struct hlist_node       node;
+       struct ftrace_probe_ops *ops;
+       unsigned long           flags;
+       unsigned long           ip;
+       void                    *data;
+       struct rcu_head         rcu;
+};
+
+enum {
+       FTRACE_ENABLE_CALLS             = (1 << 0),
+       FTRACE_DISABLE_CALLS            = (1 << 1),
+       FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
+       FTRACE_ENABLE_MCOUNT            = (1 << 3),
+       FTRACE_DISABLE_MCOUNT           = (1 << 4),
+       FTRACE_START_FUNC_RET           = (1 << 5),
+       FTRACE_STOP_FUNC_RET            = (1 << 6),
+};
+
+static int ftrace_filtered;
+
+static struct dyn_ftrace *ftrace_new_addrs;
+
+static DEFINE_MUTEX(ftrace_regex_lock);
+
+struct ftrace_page {
+       struct ftrace_page      *next;
+       int                     index;
+       struct dyn_ftrace       records[];
+};
+
+#define ENTRIES_PER_PAGE \
+  ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
+
+/* estimate from running different kernels */
+#define NR_TO_INIT             10000
+
+static struct ftrace_page      *ftrace_pages_start;
+static struct ftrace_page      *ftrace_pages;
+
+static struct dyn_ftrace *ftrace_free_records;
+
+/*
+ * This is a double for. Do not use 'break' to break out of the loop,
+ * you must use a goto.
+ */
+#define do_for_each_ftrace_rec(pg, rec)                                        \
+       for (pg = ftrace_pages_start; pg; pg = pg->next) {              \
+               int _____i;                                             \
+               for (_____i = 0; _____i < pg->index; _____i++) {        \
+                       rec = &pg->records[_____i];
+
+#define while_for_each_ftrace_rec()            \
+               }                               \
+       }
+
+#ifdef CONFIG_KPROBES
+
+static int frozen_record_count;
+
+static inline void freeze_record(struct dyn_ftrace *rec)
+{
+       if (!(rec->flags & FTRACE_FL_FROZEN)) {
+               rec->flags |= FTRACE_FL_FROZEN;
+               frozen_record_count++;
+       }
+}
+
+static inline void unfreeze_record(struct dyn_ftrace *rec)
+{
+       if (rec->flags & FTRACE_FL_FROZEN) {
+               rec->flags &= ~FTRACE_FL_FROZEN;
+               frozen_record_count--;
+       }
+}
+
+static inline int record_frozen(struct dyn_ftrace *rec)
+{
+       return rec->flags & FTRACE_FL_FROZEN;
+}
+#else
+# define freeze_record(rec)                    ({ 0; })
+# define unfreeze_record(rec)                  ({ 0; })
+# define record_frozen(rec)                    ({ 0; })
+#endif /* CONFIG_KPROBES */
+
+static void ftrace_free_rec(struct dyn_ftrace *rec)
+{
+       rec->freelist = ftrace_free_records;
+       ftrace_free_records = rec;
+       rec->flags |= FTRACE_FL_FREE;
+}
+
+static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
+{
+       struct dyn_ftrace *rec;
+
+       /* First check for freed records */
+       if (ftrace_free_records) {
+               rec = ftrace_free_records;
+
+               if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
+                       FTRACE_WARN_ON_ONCE(1);
+                       ftrace_free_records = NULL;
+                       return NULL;
+               }
+
+               ftrace_free_records = rec->freelist;
+               memset(rec, 0, sizeof(*rec));
+               return rec;
+       }
+
+       if (ftrace_pages->index == ENTRIES_PER_PAGE) {
+               if (!ftrace_pages->next) {
+                       /* allocate another page */
+                       ftrace_pages->next =
+                               (void *)get_zeroed_page(GFP_KERNEL);
+                       if (!ftrace_pages->next)
+                               return NULL;
+               }
+               ftrace_pages = ftrace_pages->next;
+       }
+
+       return &ftrace_pages->records[ftrace_pages->index++];
+}
+
+static struct dyn_ftrace *
+ftrace_record_ip(unsigned long ip)
+{
+       struct dyn_ftrace *rec;
+
+       if (ftrace_disabled)
+               return NULL;
+
+       rec = ftrace_alloc_dyn_node(ip);
+       if (!rec)
+               return NULL;
+
+       rec->ip = ip;
+       rec->newlist = ftrace_new_addrs;
+       ftrace_new_addrs = rec;
+
+       return rec;
+}
+
+static void print_ip_ins(const char *fmt, unsigned char *p)
+{
+       int i;
+
+       printk(KERN_CONT "%s", fmt);
+
+       for (i = 0; i < MCOUNT_INSN_SIZE; i++)
+               printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
+}
+
+static void ftrace_bug(int failed, unsigned long ip)
+{
+       switch (failed) {
+       case -EFAULT:
+               FTRACE_WARN_ON_ONCE(1);
+               pr_info("ftrace faulted on modifying ");
+               print_ip_sym(ip);
+               break;
+       case -EINVAL:
+               FTRACE_WARN_ON_ONCE(1);
+               pr_info("ftrace failed to modify ");
+               print_ip_sym(ip);
+               print_ip_ins(" actual: ", (unsigned char *)ip);
+               printk(KERN_CONT "\n");
+               break;
+       case -EPERM:
+               FTRACE_WARN_ON_ONCE(1);
+               pr_info("ftrace faulted on writing ");
+               print_ip_sym(ip);
+               break;
+       default:
+               FTRACE_WARN_ON_ONCE(1);
+               pr_info("ftrace faulted on unknown error ");
+               print_ip_sym(ip);
+       }
+}
+
+
+static int
+__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
+{
+       unsigned long ftrace_addr;
+       unsigned long flag = 0UL;
+
+       ftrace_addr = (unsigned long)FTRACE_ADDR;
+
+       /*
+        * If this record is not to be traced or we want to disable it,
+        * then disable it.
+        *
+        * If we want to enable it and filtering is off, then enable it.
+        *
+        * If we want to enable it and filtering is on, enable it only if
+        * it's filtered
+        */
+       if (enable && !(rec->flags & FTRACE_FL_NOTRACE)) {
+               if (!ftrace_filtered || (rec->flags & FTRACE_FL_FILTER))
+                       flag = FTRACE_FL_ENABLED;
+       }
+
+       /* If the state of this record hasn't changed, then do nothing */
+       if ((rec->flags & FTRACE_FL_ENABLED) == flag)
+               return 0;
+
+       if (flag) {
+               rec->flags |= FTRACE_FL_ENABLED;
+               return ftrace_make_call(rec, ftrace_addr);
+       }
+
+       rec->flags &= ~FTRACE_FL_ENABLED;
+       return ftrace_make_nop(NULL, rec, ftrace_addr);
+}
+
+static void ftrace_replace_code(int enable)
+{
+       struct dyn_ftrace *rec;
+       struct ftrace_page *pg;
+       int failed;
+
+       do_for_each_ftrace_rec(pg, rec) {
+               /*
+                * Skip over free records, records that have
+                * failed and not converted.
+                */
+               if (rec->flags & FTRACE_FL_FREE ||
+                   rec->flags & FTRACE_FL_FAILED ||
+                   !(rec->flags & FTRACE_FL_CONVERTED))
+                       continue;
+
+               /* ignore updates to this record's mcount site */
+               if (get_kprobe((void *)rec->ip)) {
+                       freeze_record(rec);
+                       continue;
+               } else {
+                       unfreeze_record(rec);
+               }
+
+               failed = __ftrace_replace_code(rec, enable);
+               if (failed) {
+                       rec->flags |= FTRACE_FL_FAILED;
+                       if ((system_state == SYSTEM_BOOTING) ||
+                           !core_kernel_text(rec->ip)) {
+                               ftrace_free_rec(rec);
+                               } else {
+                               ftrace_bug(failed, rec->ip);
+                                       /* Stop processing */
+                                       return;
+                               }
+               }
+       } while_for_each_ftrace_rec();
+}
+
+static int
+ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
+{
+       unsigned long ip;
+       int ret;
+
+       ip = rec->ip;
+
+       ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
        if (ret) {
                ftrace_bug(ret, ip);
                rec->flags |= FTRACE_FL_FAILED;
                return 0;
        }
-       return 1;
+       return 1;
+}
+
+/*
+ * archs can override this function if they must do something
+ * before the modifying code is performed.
+ */
+int __weak ftrace_arch_code_modify_prepare(void)
+{
+       return 0;
+}
+
+/*
+ * archs can override this function if they must do something
+ * after the modifying code is performed.
+ */
+int __weak ftrace_arch_code_modify_post_process(void)
+{
+       return 0;
+}
+
+static int __ftrace_modify_code(void *data)
+{
+       int *command = data;
+
+       if (*command & FTRACE_ENABLE_CALLS)
+               ftrace_replace_code(1);
+       else if (*command & FTRACE_DISABLE_CALLS)
+               ftrace_replace_code(0);
+
+       if (*command & FTRACE_UPDATE_TRACE_FUNC)
+               ftrace_update_ftrace_func(ftrace_trace_function);
+
+       if (*command & FTRACE_START_FUNC_RET)
+               ftrace_enable_ftrace_graph_caller();
+       else if (*command & FTRACE_STOP_FUNC_RET)
+               ftrace_disable_ftrace_graph_caller();
+
+       return 0;
+}
+
+static void ftrace_run_update_code(int command)
+{
+       int ret;
+
+       ret = ftrace_arch_code_modify_prepare();
+       FTRACE_WARN_ON(ret);
+       if (ret)
+               return;
+
+       stop_machine(__ftrace_modify_code, &command, NULL);
+
+       ret = ftrace_arch_code_modify_post_process();
+       FTRACE_WARN_ON(ret);
+}
+
+static ftrace_func_t saved_ftrace_func;
+static int ftrace_start_up;
+
+static void ftrace_startup_enable(int command)
+{
+       if (saved_ftrace_func != ftrace_trace_function) {
+               saved_ftrace_func = ftrace_trace_function;
+               command |= FTRACE_UPDATE_TRACE_FUNC;
+       }
+
+       if (!command || !ftrace_enabled)
+               return;
+
+       ftrace_run_update_code(command);
+}
+
+static void ftrace_startup(int command)
+{
+       if (unlikely(ftrace_disabled))
+               return;
+
+       ftrace_start_up++;
+       command |= FTRACE_ENABLE_CALLS;
+
+       ftrace_startup_enable(command);
+}
+
+static void ftrace_shutdown(int command)
+{
+       if (unlikely(ftrace_disabled))
+               return;
+
+       ftrace_start_up--;
+       /*
+        * Just warn in case of unbalance, no need to kill ftrace, it's not
+        * critical but the ftrace_call callers may be never nopped again after
+        * further ftrace uses.
+        */
+       WARN_ON_ONCE(ftrace_start_up < 0);
+
+       if (!ftrace_start_up)
+               command |= FTRACE_DISABLE_CALLS;
+
+       if (saved_ftrace_func != ftrace_trace_function) {
+               saved_ftrace_func = ftrace_trace_function;
+               command |= FTRACE_UPDATE_TRACE_FUNC;
+       }
+
+       if (!command || !ftrace_enabled)
+               return;
+
+       ftrace_run_update_code(command);
+}
+
+static void ftrace_startup_sysctl(void)
+{
+       int command = FTRACE_ENABLE_MCOUNT;
+
+       if (unlikely(ftrace_disabled))
+               return;
+
+       /* Force update next time */
+       saved_ftrace_func = NULL;
+       /* ftrace_start_up is true if we want ftrace running */
+       if (ftrace_start_up)
+               command |= FTRACE_ENABLE_CALLS;
+
+       ftrace_run_update_code(command);
+}
+
+static void ftrace_shutdown_sysctl(void)
+{
+       int command = FTRACE_DISABLE_MCOUNT;
+
+       if (unlikely(ftrace_disabled))
+               return;
+
+       /* ftrace_start_up is true if ftrace is running */
+       if (ftrace_start_up)
+               command |= FTRACE_DISABLE_CALLS;
+
+       ftrace_run_update_code(command);
+}
+
+static cycle_t         ftrace_update_time;
+static unsigned long   ftrace_update_cnt;
+unsigned long          ftrace_update_tot_cnt;
+
+static int ftrace_update_code(struct module *mod)
+{
+       struct dyn_ftrace *p;
+       cycle_t start, stop;
+
+       start = ftrace_now(raw_smp_processor_id());
+       ftrace_update_cnt = 0;
+
+       while (ftrace_new_addrs) {
+
+               /* If something went wrong, bail without enabling anything */
+               if (unlikely(ftrace_disabled))
+                       return -1;
+
+               p = ftrace_new_addrs;
+               ftrace_new_addrs = p->newlist;
+               p->flags = 0L;
+
+               /* convert record (i.e, patch mcount-call with NOP) */
+               if (ftrace_code_disable(mod, p)) {
+                       p->flags |= FTRACE_FL_CONVERTED;
+                       ftrace_update_cnt++;
+               } else
+                       ftrace_free_rec(p);
+       }
+
+       stop = ftrace_now(raw_smp_processor_id());
+       ftrace_update_time = stop - start;
+       ftrace_update_tot_cnt += ftrace_update_cnt;
+
+       return 0;
+}
+
+static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
+{
+       struct ftrace_page *pg;
+       int cnt;
+       int i;
+
+       /* allocate a few pages */
+       ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
+       if (!ftrace_pages_start)
+               return -1;
+
+       /*
+        * Allocate a few more pages.
+        *
+        * TODO: have some parser search vmlinux before
+        *   final linking to find all calls to ftrace.
+        *   Then we can:
+        *    a) know how many pages to allocate.
+        *     and/or
+        *    b) set up the table then.
+        *
+        *  The dynamic code is still necessary for
+        *  modules.
+        */
+
+       pg = ftrace_pages = ftrace_pages_start;
+
+       cnt = num_to_init / ENTRIES_PER_PAGE;
+       pr_info("ftrace: allocating %ld entries in %d pages\n",
+               num_to_init, cnt + 1);
+
+       for (i = 0; i < cnt; i++) {
+               pg->next = (void *)get_zeroed_page(GFP_KERNEL);
+
+               /* If we fail, we'll try later anyway */
+               if (!pg->next)
+                       break;
+
+               pg = pg->next;
+       }
+
+       return 0;
+}
+
+enum {
+       FTRACE_ITER_FILTER      = (1 << 0),
+       FTRACE_ITER_NOTRACE     = (1 << 1),
+       FTRACE_ITER_FAILURES    = (1 << 2),
+       FTRACE_ITER_PRINTALL    = (1 << 3),
+       FTRACE_ITER_HASH        = (1 << 4),
+};
+
+#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
+
+struct ftrace_iterator {
+       struct ftrace_page      *pg;
+       int                     hidx;
+       int                     idx;
+       unsigned                flags;
+       struct trace_parser     parser;
+};
+
+static void *
+t_hash_next(struct seq_file *m, void *v, loff_t *pos)
+{
+       struct ftrace_iterator *iter = m->private;
+       struct hlist_node *hnd = v;
+       struct hlist_head *hhd;
+
+       WARN_ON(!(iter->flags & FTRACE_ITER_HASH));
+
+       (*pos)++;
+
+ retry:
+       if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
+               return NULL;
+
+       hhd = &ftrace_func_hash[iter->hidx];
+
+       if (hlist_empty(hhd)) {
+               iter->hidx++;
+               hnd = NULL;
+               goto retry;
+       }
+
+       if (!hnd)
+               hnd = hhd->first;
+       else {
+               hnd = hnd->next;
+               if (!hnd) {
+                       iter->hidx++;
+                       goto retry;
+               }
+       }
+
+       return hnd;
+}
+
+static void *t_hash_start(struct seq_file *m, loff_t *pos)
+{
+       struct ftrace_iterator *iter = m->private;
+       void *p = NULL;
+       loff_t l;
+
+       if (!(iter->flags & FTRACE_ITER_HASH))
+               *pos = 0;
+
+       iter->flags |= FTRACE_ITER_HASH;
+
+       iter->hidx = 0;
+       for (l = 0; l <= *pos; ) {
+               p = t_hash_next(m, p, &l);
+               if (!p)
+                       break;
+       }
+       return p;
+}
+
+static int t_hash_show(struct seq_file *m, void *v)
+{
+       struct ftrace_func_probe *rec;
+       struct hlist_node *hnd = v;
+
+       rec = hlist_entry(hnd, struct ftrace_func_probe, node);
+
+       if (rec->ops->print)
+               return rec->ops->print(m, rec->ip, rec->ops, rec->data);
+
+       seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
+
+       if (rec->data)
+               seq_printf(m, ":%p", rec->data);
+       seq_putc(m, '\n');
+
+       return 0;
+}
+
+static void *
+t_next(struct seq_file *m, void *v, loff_t *pos)
+{
+       struct ftrace_iterator *iter = m->private;
+       struct dyn_ftrace *rec = NULL;
+
+       if (iter->flags & FTRACE_ITER_HASH)
+               return t_hash_next(m, v, pos);
+
+       (*pos)++;
+
+       if (iter->flags & FTRACE_ITER_PRINTALL)
+               return NULL;
+
+ retry:
+       if (iter->idx >= iter->pg->index) {
+               if (iter->pg->next) {
+                       iter->pg = iter->pg->next;
+                       iter->idx = 0;
+                       goto retry;
+               }
+       } else {
+               rec = &iter->pg->records[iter->idx++];
+               if ((rec->flags & FTRACE_FL_FREE) ||
+
+                   (!(iter->flags & FTRACE_ITER_FAILURES) &&
+                    (rec->flags & FTRACE_FL_FAILED)) ||
+
+                   ((iter->flags & FTRACE_ITER_FAILURES) &&
+                    !(rec->flags & FTRACE_FL_FAILED)) ||
+
+                   ((iter->flags & FTRACE_ITER_FILTER) &&
+                    !(rec->flags & FTRACE_FL_FILTER)) ||
+
+                   ((iter->flags & FTRACE_ITER_NOTRACE) &&
+                    !(rec->flags & FTRACE_FL_NOTRACE))) {
+                       rec = NULL;
+                       goto retry;
+               }
+       }
+
+       return rec;
 }
 
-static int __ftrace_modify_code(void *data)
+static void *t_start(struct seq_file *m, loff_t *pos)
 {
-       int *command = data;
+       struct ftrace_iterator *iter = m->private;
+       void *p = NULL;
+       loff_t l;
 
-       if (*command & FTRACE_ENABLE_CALLS)
-               ftrace_replace_code(1);
-       else if (*command & FTRACE_DISABLE_CALLS)
-               ftrace_replace_code(0);
+       mutex_lock(&ftrace_lock);
+       /*
+        * For set_ftrace_filter reading, if we have the filter
+        * off, we can short cut and just print out that all
+        * functions are enabled.
+        */
+       if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
+               if (*pos > 0)
+                       return t_hash_start(m, pos);
+               iter->flags |= FTRACE_ITER_PRINTALL;
+               return iter;
+       }
 
-       if (*command & FTRACE_UPDATE_TRACE_FUNC)
-               ftrace_update_ftrace_func(ftrace_trace_function);
+       if (iter->flags & FTRACE_ITER_HASH)
+               return t_hash_start(m, pos);
 
-       if (*command & FTRACE_START_FUNC_RET)
-               ftrace_enable_ftrace_graph_caller();
-       else if (*command & FTRACE_STOP_FUNC_RET)
-               ftrace_disable_ftrace_graph_caller();
+       iter->pg = ftrace_pages_start;
+       iter->idx = 0;
+       for (l = 0; l <= *pos; ) {
+               p = t_next(m, p, &l);
+               if (!p)
+                       break;
+       }
 
-       return 0;
+       if (!p && iter->flags & FTRACE_ITER_FILTER)
+               return t_hash_start(m, pos);
+
+       return p;
 }
 
-static void ftrace_run_update_code(int command)
+static void t_stop(struct seq_file *m, void *p)
 {
-       stop_machine(__ftrace_modify_code, &command, NULL);
+       mutex_unlock(&ftrace_lock);
 }
 
-static ftrace_func_t saved_ftrace_func;
-static int ftrace_start_up;
-
-static void ftrace_startup_enable(int command)
+static int t_show(struct seq_file *m, void *v)
 {
-       if (saved_ftrace_func != ftrace_trace_function) {
-               saved_ftrace_func = ftrace_trace_function;
-               command |= FTRACE_UPDATE_TRACE_FUNC;
+       struct ftrace_iterator *iter = m->private;
+       struct dyn_ftrace *rec = v;
+
+       if (iter->flags & FTRACE_ITER_HASH)
+               return t_hash_show(m, v);
+
+       if (iter->flags & FTRACE_ITER_PRINTALL) {
+               seq_printf(m, "#### all functions enabled ####\n");
+               return 0;
        }
 
-       if (!command || !ftrace_enabled)
-               return;
+       if (!rec)
+               return 0;
 
-       ftrace_run_update_code(command);
+       seq_printf(m, "%ps\n", (void *)rec->ip);
+
+       return 0;
 }
 
-static void ftrace_startup(int command)
+static const struct seq_operations show_ftrace_seq_ops = {
+       .start = t_start,
+       .next = t_next,
+       .stop = t_stop,
+       .show = t_show,
+};
+
+static int
+ftrace_avail_open(struct inode *inode, struct file *file)
 {
+       struct ftrace_iterator *iter;
+       int ret;
+
        if (unlikely(ftrace_disabled))
-               return;
+               return -ENODEV;
 
-       mutex_lock(&ftrace_start_lock);
-       ftrace_start_up++;
-       command |= FTRACE_ENABLE_CALLS;
+       iter = kzalloc(sizeof(*iter), GFP_KERNEL);
+       if (!iter)
+               return -ENOMEM;
 
-       ftrace_startup_enable(command);
+       iter->pg = ftrace_pages_start;
+
+       ret = seq_open(file, &show_ftrace_seq_ops);
+       if (!ret) {
+               struct seq_file *m = file->private_data;
+
+               m->private = iter;
+       } else {
+               kfree(iter);
+       }
 
-       mutex_unlock(&ftrace_start_lock);
+       return ret;
 }
 
-static void ftrace_shutdown(int command)
+static int
+ftrace_failures_open(struct inode *inode, struct file *file)
 {
-       if (unlikely(ftrace_disabled))
-               return;
-
-       mutex_lock(&ftrace_start_lock);
-       ftrace_start_up--;
-       if (!ftrace_start_up)
-               command |= FTRACE_DISABLE_CALLS;
+       int ret;
+       struct seq_file *m;
+       struct ftrace_iterator *iter;
 
-       if (saved_ftrace_func != ftrace_trace_function) {
-               saved_ftrace_func = ftrace_trace_function;
-               command |= FTRACE_UPDATE_TRACE_FUNC;
+       ret = ftrace_avail_open(inode, file);
+       if (!ret) {
+               m = (struct seq_file *)file->private_data;
+               iter = (struct ftrace_iterator *)m->private;
+               iter->flags = FTRACE_ITER_FAILURES;
        }
 
-       if (!command || !ftrace_enabled)
-               goto out;
-
-       ftrace_run_update_code(command);
- out:
-       mutex_unlock(&ftrace_start_lock);
+       return ret;
 }
 
-static void ftrace_startup_sysctl(void)
-{
-       int command = FTRACE_ENABLE_MCOUNT;
-
-       if (unlikely(ftrace_disabled))
-               return;
 
-       mutex_lock(&ftrace_start_lock);
-       /* Force update next time */
-       saved_ftrace_func = NULL;
-       /* ftrace_start_up is true if we want ftrace running */
-       if (ftrace_start_up)
-               command |= FTRACE_ENABLE_CALLS;
+static void ftrace_filter_reset(int enable)
+{
+       struct ftrace_page *pg;
+       struct dyn_ftrace *rec;
+       unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
 
-       ftrace_run_update_code(command);
-       mutex_unlock(&ftrace_start_lock);
+       mutex_lock(&ftrace_lock);
+       if (enable)
+               ftrace_filtered = 0;
+       do_for_each_ftrace_rec(pg, rec) {
+               if (rec->flags & FTRACE_FL_FAILED)
+                       continue;
+               rec->flags &= ~type;
+       } while_for_each_ftrace_rec();
+       mutex_unlock(&ftrace_lock);
 }
 
-static void ftrace_shutdown_sysctl(void)
+static int
+ftrace_regex_open(struct inode *inode, struct file *file, int enable)
 {
-       int command = FTRACE_DISABLE_MCOUNT;
+       struct ftrace_iterator *iter;
+       int ret = 0;
 
        if (unlikely(ftrace_disabled))
-               return;
+               return -ENODEV;
 
-       mutex_lock(&ftrace_start_lock);
-       /* ftrace_start_up is true if ftrace is running */
-       if (ftrace_start_up)
-               command |= FTRACE_DISABLE_CALLS;
+       iter = kzalloc(sizeof(*iter), GFP_KERNEL);
+       if (!iter)
+               return -ENOMEM;
 
-       ftrace_run_update_code(command);
-       mutex_unlock(&ftrace_start_lock);
-}
+       if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
+               kfree(iter);
+               return -ENOMEM;
+       }
 
-static cycle_t         ftrace_update_time;
-static unsigned long   ftrace_update_cnt;
-unsigned long          ftrace_update_tot_cnt;
+       mutex_lock(&ftrace_regex_lock);
+       if ((file->f_mode & FMODE_WRITE) &&
+           (file->f_flags & O_TRUNC))
+               ftrace_filter_reset(enable);
 
-static int ftrace_update_code(struct module *mod)
-{
-       struct dyn_ftrace *p, *t;
-       cycle_t start, stop;
+       if (file->f_mode & FMODE_READ) {
+               iter->pg = ftrace_pages_start;
+               iter->flags = enable ? FTRACE_ITER_FILTER :
+                       FTRACE_ITER_NOTRACE;
 
-       start = ftrace_now(raw_smp_processor_id());
-       ftrace_update_cnt = 0;
+               ret = seq_open(file, &show_ftrace_seq_ops);
+               if (!ret) {
+                       struct seq_file *m = file->private_data;
+                       m->private = iter;
+               } else
+                       kfree(iter);
+       } else
+               file->private_data = iter;
+       mutex_unlock(&ftrace_regex_lock);
 
-       list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
+       return ret;
+}
 
-               /* If something went wrong, bail without enabling anything */
-               if (unlikely(ftrace_disabled))
-                       return -1;
+static int
+ftrace_filter_open(struct inode *inode, struct file *file)
+{
+       return ftrace_regex_open(inode, file, 1);
+}
 
-               list_del_init(&p->list);
+static int
+ftrace_notrace_open(struct inode *inode, struct file *file)
+{
+       return ftrace_regex_open(inode, file, 0);
+}
 
-               /* convert record (i.e, patch mcount-call with NOP) */
-               if (ftrace_code_disable(mod, p)) {
-                       p->flags |= FTRACE_FL_CONVERTED;
-                       ftrace_update_cnt++;
-               } else
-                       ftrace_free_rec(p);
-       }
+static loff_t
+ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
+{
+       loff_t ret;
 
-       stop = ftrace_now(raw_smp_processor_id());
-       ftrace_update_time = stop - start;
-       ftrace_update_tot_cnt += ftrace_update_cnt;
+       if (file->f_mode & FMODE_READ)
+               ret = seq_lseek(file, offset, origin);
+       else
+               file->f_pos = ret = 1;
 
-       return 0;
+       return ret;
 }
 
-static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
+enum {
+       MATCH_FULL,
+       MATCH_FRONT_ONLY,
+       MATCH_MIDDLE_ONLY,
+       MATCH_END_ONLY,
+};
+
+/*
+ * (static function - no need for kernel doc)
+ *
+ * Pass in a buffer containing a glob and this function will
+ * set search to point to the search part of the buffer and
+ * return the type of search it is (see enum above).
+ * This does modify buff.
+ *
+ * Returns enum type.
+ *  search returns the pointer to use for comparison.
+ *  not returns 1 if buff started with a '!'
+ *     0 otherwise.
+ */
+static int
+ftrace_setup_glob(char *buff, int len, char **search, int *not)
 {
-       struct ftrace_page *pg;
-       int cnt;
+       int type = MATCH_FULL;
        int i;
 
-       /* allocate a few pages */
-       ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
-       if (!ftrace_pages_start)
-               return -1;
-
-       /*
-        * Allocate a few more pages.
-        *
-        * TODO: have some parser search vmlinux before
-        *   final linking to find all calls to ftrace.
-        *   Then we can:
-        *    a) know how many pages to allocate.
-        *     and/or
-        *    b) set up the table then.
-        *
-        *  The dynamic code is still necessary for
-        *  modules.
-        */
+       if (buff[0] == '!') {
+               *not = 1;
+               buff++;
+               len--;
+       } else
+               *not = 0;
 
-       pg = ftrace_pages = ftrace_pages_start;
+       *search = buff;
 
-       cnt = num_to_init / ENTRIES_PER_PAGE;
-       pr_info("ftrace: allocating %ld entries in %d pages\n",
-               num_to_init, cnt + 1);
+       for (i = 0; i < len; i++) {
+               if (buff[i] == '*') {
+                       if (!i) {
+                               *search = buff + 1;
+                               type = MATCH_END_ONLY;
+                       } else {
+                               if (type == MATCH_END_ONLY)
+                                       type = MATCH_MIDDLE_ONLY;
+                               else
+                                       type = MATCH_FRONT_ONLY;
+                               buff[i] = 0;
+                               break;
+                       }
+               }
+       }
 
-       for (i = 0; i < cnt; i++) {
-               pg->next = (void *)get_zeroed_page(GFP_KERNEL);
+       return type;
+}
 
-               /* If we fail, we'll try later anyway */
-               if (!pg->next)
-                       break;
+static int ftrace_match(char *str, char *regex, int len, int type)
+{
+       int matched = 0;
+       char *ptr;
 
-               pg = pg->next;
+       switch (type) {
+       case MATCH_FULL:
+               if (strcmp(str, regex) == 0)
+                       matched = 1;
+               break;
+       case MATCH_FRONT_ONLY:
+               if (strncmp(str, regex, len) == 0)
+                       matched = 1;
+               break;
+       case MATCH_MIDDLE_ONLY:
+               if (strstr(str, regex))
+                       matched = 1;
+               break;
+       case MATCH_END_ONLY:
+               ptr = strstr(str, regex);
+               if (ptr && (ptr[len] == 0))
+                       matched = 1;
+               break;
        }
 
-       return 0;
+       return matched;
 }
 
-enum {
-       FTRACE_ITER_FILTER      = (1 << 0),
-       FTRACE_ITER_CONT        = (1 << 1),
-       FTRACE_ITER_NOTRACE     = (1 << 2),
-       FTRACE_ITER_FAILURES    = (1 << 3),
-};
-
-#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
+static int
+ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
+{
+       char str[KSYM_SYMBOL_LEN];
 
-struct ftrace_iterator {
-       struct ftrace_page      *pg;
-       unsigned                idx;
-       unsigned                flags;
-       unsigned char           buffer[FTRACE_BUFF_MAX+1];
-       unsigned                buffer_idx;
-       unsigned                filtered;
-};
+       kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
+       return ftrace_match(str, regex, len, type);
+}
 
-static void *
-t_next(struct seq_file *m, void *v, loff_t *pos)
+static void ftrace_match_records(char *buff, int len, int enable)
 {
-       struct ftrace_iterator *iter = m->private;
-       struct dyn_ftrace *rec = NULL;
-
-       (*pos)++;
+       unsigned int search_len;
+       struct ftrace_page *pg;
+       struct dyn_ftrace *rec;
+       unsigned long flag;
+       char *search;
+       int type;
+       int not;
 
-       /* should not be called from interrupt context */
-       spin_lock(&ftrace_lock);
- retry:
-       if (iter->idx >= iter->pg->index) {
-               if (iter->pg->next) {
-                       iter->pg = iter->pg->next;
-                       iter->idx = 0;
-                       goto retry;
-               } else {
-                       iter->idx = -1;
-               }
-       } else {
-               rec = &iter->pg->records[iter->idx++];
-               if ((rec->flags & FTRACE_FL_FREE) ||
+       flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
+       type = ftrace_setup_glob(buff, len, &search, &not);
 
-                   (!(iter->flags & FTRACE_ITER_FAILURES) &&
-                    (rec->flags & FTRACE_FL_FAILED)) ||
+       search_len = strlen(search);
 
-                   ((iter->flags & FTRACE_ITER_FAILURES) &&
-                    !(rec->flags & FTRACE_FL_FAILED)) ||
+       mutex_lock(&ftrace_lock);
+       do_for_each_ftrace_rec(pg, rec) {
 
-                   ((iter->flags & FTRACE_ITER_FILTER) &&
-                    !(rec->flags & FTRACE_FL_FILTER)) ||
+               if (rec->flags & FTRACE_FL_FAILED)
+                       continue;
 
-                   ((iter->flags & FTRACE_ITER_NOTRACE) &&
-                    !(rec->flags & FTRACE_FL_NOTRACE))) {
-                       rec = NULL;
-                       goto retry;
+               if (ftrace_match_record(rec, search, search_len, type)) {
+                       if (not)
+                               rec->flags &= ~flag;
+                       else
+                               rec->flags |= flag;
                }
-       }
-       spin_unlock(&ftrace_lock);
-
-       return rec;
+               /*
+                * Only enable filtering if we have a function that
+                * is filtered on.
+                */
+               if (enable && (rec->flags & FTRACE_FL_FILTER))
+                       ftrace_filtered = 1;
+       } while_for_each_ftrace_rec();
+       mutex_unlock(&ftrace_lock);
 }
 
-static void *t_start(struct seq_file *m, loff_t *pos)
+static int
+ftrace_match_module_record(struct dyn_ftrace *rec, char *mod,
+                          char *regex, int len, int type)
 {
-       struct ftrace_iterator *iter = m->private;
-       void *p = NULL;
+       char str[KSYM_SYMBOL_LEN];
+       char *modname;
 
-       if (*pos > 0) {
-               if (iter->idx < 0)
-                       return p;
-               (*pos)--;
-               iter->idx--;
-       }
+       kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
 
-       p = t_next(m, p, pos);
+       if (!modname || strcmp(modname, mod))
+               return 0;
 
-       return p;
+       /* blank search means to match all funcs in the mod */
+       if (len)
+               return ftrace_match(str, regex, len, type);
+       else
+               return 1;
 }
 
-static void t_stop(struct seq_file *m, void *p)
+static void ftrace_match_module_records(char *buff, char *mod, int enable)
 {
+       unsigned search_len = 0;
+       struct ftrace_page *pg;
+       struct dyn_ftrace *rec;
+       int type = MATCH_FULL;
+       char *search = buff;
+       unsigned long flag;
+       int not = 0;
+
+       flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
+
+       /* blank or '*' mean the same */
+       if (strcmp(buff, "*") == 0)
+               buff[0] = 0;
+
+       /* handle the case of 'dont filter this module' */
+       if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
+               buff[0] = 0;
+               not = 1;
+       }
+
+       if (strlen(buff)) {
+               type = ftrace_setup_glob(buff, strlen(buff), &search, &not);
+               search_len = strlen(search);
+       }
+
+       mutex_lock(&ftrace_lock);
+       do_for_each_ftrace_rec(pg, rec) {
+
+               if (rec->flags & FTRACE_FL_FAILED)
+                       continue;
+
+               if (ftrace_match_module_record(rec, mod,
+                                              search, search_len, type)) {
+                       if (not)
+                               rec->flags &= ~flag;
+                       else
+                               rec->flags |= flag;
+               }
+               if (enable && (rec->flags & FTRACE_FL_FILTER))
+                       ftrace_filtered = 1;
+
+       } while_for_each_ftrace_rec();
+       mutex_unlock(&ftrace_lock);
 }
 
-static int t_show(struct seq_file *m, void *v)
+/*
+ * We register the module command as a template to show others how
+ * to register the a command as well.
+ */
+
+static int
+ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
 {
-       struct dyn_ftrace *rec = v;
-       char str[KSYM_SYMBOL_LEN];
+       char *mod;
 
-       if (!rec)
-               return 0;
+       /*
+        * cmd == 'mod' because we only registered this func
+        * for the 'mod' ftrace_func_command.
+        * But if you register one func with multiple commands,
+        * you can tell which command was used by the cmd
+        * parameter.
+        */
 
-       kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
+       /* we must have a module name */
+       if (!param)
+               return -EINVAL;
 
-       seq_printf(m, "%s\n", str);
+       mod = strsep(&param, ":");
+       if (!strlen(mod))
+               return -EINVAL;
 
+       ftrace_match_module_records(func, mod, enable);
        return 0;
 }
 
-static struct seq_operations show_ftrace_seq_ops = {
-       .start = t_start,
-       .next = t_next,
-       .stop = t_stop,
-       .show = t_show,
+static struct ftrace_func_command ftrace_mod_cmd = {
+       .name                   = "mod",
+       .func                   = ftrace_mod_callback,
 };
 
-static int
-ftrace_avail_open(struct inode *inode, struct file *file)
+static int __init ftrace_mod_cmd_init(void)
 {
-       struct ftrace_iterator *iter;
-       int ret;
+       return register_ftrace_command(&ftrace_mod_cmd);
+}
+device_initcall(ftrace_mod_cmd_init);
 
-       if (unlikely(ftrace_disabled))
-               return -ENODEV;
+static void
+function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
+{
+       struct ftrace_func_probe *entry;
+       struct hlist_head *hhd;
+       struct hlist_node *n;
+       unsigned long key;
+       int resched;
 
-       iter = kzalloc(sizeof(*iter), GFP_KERNEL);
-       if (!iter)
-               return -ENOMEM;
+       key = hash_long(ip, FTRACE_HASH_BITS);
 
-       iter->pg = ftrace_pages_start;
+       hhd = &ftrace_func_hash[key];
 
-       ret = seq_open(file, &show_ftrace_seq_ops);
-       if (!ret) {
-               struct seq_file *m = file->private_data;
+       if (hlist_empty(hhd))
+               return;
 
-               m->private = iter;
-       } else {
-               kfree(iter);
+       /*
+        * Disable preemption for these calls to prevent a RCU grace
+        * period. This syncs the hash iteration and freeing of items
+        * on the hash. rcu_read_lock is too dangerous here.
+        */
+       resched = ftrace_preempt_disable();
+       hlist_for_each_entry_rcu(entry, n, hhd, node) {
+               if (entry->ip == ip)
+                       entry->ops->func(ip, parent_ip, &entry->data);
        }
-
-       return ret;
+       ftrace_preempt_enable(resched);
 }
 
-int ftrace_avail_release(struct inode *inode, struct file *file)
+static struct ftrace_ops trace_probe_ops __read_mostly =
 {
-       struct seq_file *m = (struct seq_file *)file->private_data;
-       struct ftrace_iterator *iter = m->private;
+       .func           = function_trace_probe_call,
+};
 
-       seq_release(inode, file);
-       kfree(iter);
+static int ftrace_probe_registered;
 
-       return 0;
+static void __enable_ftrace_function_probe(void)
+{
+       int i;
+
+       if (ftrace_probe_registered)
+               return;
+
+       for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
+               struct hlist_head *hhd = &ftrace_func_hash[i];
+               if (hhd->first)
+                       break;
+       }
+       /* Nothing registered? */
+       if (i == FTRACE_FUNC_HASHSIZE)
+               return;
+
+       __register_ftrace_function(&trace_probe_ops);
+       ftrace_startup(0);
+       ftrace_probe_registered = 1;
 }
 
-static int
-ftrace_failures_open(struct inode *inode, struct file *file)
+static void __disable_ftrace_function_probe(void)
 {
-       int ret;
-       struct seq_file *m;
-       struct ftrace_iterator *iter;
+       int i;
 
-       ret = ftrace_avail_open(inode, file);
-       if (!ret) {
-               m = (struct seq_file *)file->private_data;
-               iter = (struct ftrace_iterator *)m->private;
-               iter->flags = FTRACE_ITER_FAILURES;
+       if (!ftrace_probe_registered)
+               return;
+
+       for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
+               struct hlist_head *hhd = &ftrace_func_hash[i];
+               if (hhd->first)
+                       return;
        }
 
-       return ret;
+       /* no more funcs left */
+       __unregister_ftrace_function(&trace_probe_ops);
+       ftrace_shutdown(0);
+       ftrace_probe_registered = 0;
 }
 
 
-static void ftrace_filter_reset(int enable)
+static void ftrace_free_entry_rcu(struct rcu_head *rhp)
+{
+       struct ftrace_func_probe *entry =
+               container_of(rhp, struct ftrace_func_probe, rcu);
+
+       if (entry->ops->free)
+               entry->ops->free(&entry->data);
+       kfree(entry);
+}
+
+
+int
+register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
+                             void *data)
 {
+       struct ftrace_func_probe *entry;
        struct ftrace_page *pg;
        struct dyn_ftrace *rec;
-       unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
-       unsigned i;
+       int type, len, not;
+       unsigned long key;
+       int count = 0;
+       char *search;
 
-       /* should not be called from interrupt context */
-       spin_lock(&ftrace_lock);
-       if (enable)
-               ftrace_filtered = 0;
-       pg = ftrace_pages_start;
-       while (pg) {
-               for (i = 0; i < pg->index; i++) {
-                       rec = &pg->records[i];
-                       if (rec->flags & FTRACE_FL_FAILED)
+       type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
+       len = strlen(search);
+
+       /* we do not support '!' for function probes */
+       if (WARN_ON(not))
+               return -EINVAL;
+
+       mutex_lock(&ftrace_lock);
+       do_for_each_ftrace_rec(pg, rec) {
+
+               if (rec->flags & FTRACE_FL_FAILED)
+                       continue;
+
+               if (!ftrace_match_record(rec, search, len, type))
+                       continue;
+
+               entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+               if (!entry) {
+                       /* If we did not process any, then return error */
+                       if (!count)
+                               count = -ENOMEM;
+                       goto out_unlock;
+               }
+
+               count++;
+
+               entry->data = data;
+
+               /*
+                * The caller might want to do something special
+                * for each function we find. We call the callback
+                * to give the caller an opportunity to do so.
+                */
+               if (ops->callback) {
+                       if (ops->callback(rec->ip, &entry->data) < 0) {
+                               /* caller does not like this func */
+                               kfree(entry);
                                continue;
-                       rec->flags &= ~type;
+                       }
                }
-               pg = pg->next;
-       }
-       spin_unlock(&ftrace_lock);
+
+               entry->ops = ops;
+               entry->ip = rec->ip;
+
+               key = hash_long(entry->ip, FTRACE_HASH_BITS);
+               hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
+
+       } while_for_each_ftrace_rec();
+       __enable_ftrace_function_probe();
+
+ out_unlock:
+       mutex_unlock(&ftrace_lock);
+
+       return count;
 }
 
-static int
-ftrace_regex_open(struct inode *inode, struct file *file, int enable)
+enum {
+       PROBE_TEST_FUNC         = 1,
+       PROBE_TEST_DATA         = 2
+};
+
+static void
+__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
+                                 void *data, int flags)
 {
-       struct ftrace_iterator *iter;
-       int ret = 0;
+       struct ftrace_func_probe *entry;
+       struct hlist_node *n, *tmp;
+       char str[KSYM_SYMBOL_LEN];
+       int type = MATCH_FULL;
+       int i, len = 0;
+       char *search;
 
-       if (unlikely(ftrace_disabled))
-               return -ENODEV;
+       if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
+               glob = NULL;
+       else if (glob) {
+               int not;
 
-       iter = kzalloc(sizeof(*iter), GFP_KERNEL);
-       if (!iter)
-               return -ENOMEM;
+               type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
+               len = strlen(search);
 
-       mutex_lock(&ftrace_regex_lock);
-       if ((file->f_mode & FMODE_WRITE) &&
-           !(file->f_flags & O_APPEND))
-               ftrace_filter_reset(enable);
+               /* we do not support '!' for function probes */
+               if (WARN_ON(not))
+                       return;
+       }
 
-       if (file->f_mode & FMODE_READ) {
-               iter->pg = ftrace_pages_start;
-               iter->flags = enable ? FTRACE_ITER_FILTER :
-                       FTRACE_ITER_NOTRACE;
+       mutex_lock(&ftrace_lock);
+       for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
+               struct hlist_head *hhd = &ftrace_func_hash[i];
 
-               ret = seq_open(file, &show_ftrace_seq_ops);
-               if (!ret) {
-                       struct seq_file *m = file->private_data;
-                       m->private = iter;
-               } else
-                       kfree(iter);
-       } else
-               file->private_data = iter;
-       mutex_unlock(&ftrace_regex_lock);
+               hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
 
-       return ret;
+                       /* break up if statements for readability */
+                       if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
+                               continue;
+
+                       if ((flags & PROBE_TEST_DATA) && entry->data != data)
+                               continue;
+
+                       /* do this last, since it is the most expensive */
+                       if (glob) {
+                               kallsyms_lookup(entry->ip, NULL, NULL,
+                                               NULL, str);
+                               if (!ftrace_match(str, glob, len, type))
+                                       continue;
+                       }
+
+                       hlist_del(&entry->node);
+                       call_rcu(&entry->rcu, ftrace_free_entry_rcu);
+               }
+       }
+       __disable_ftrace_function_probe();
+       mutex_unlock(&ftrace_lock);
 }
 
-static int
-ftrace_filter_open(struct inode *inode, struct file *file)
+void
+unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
+                               void *data)
 {
-       return ftrace_regex_open(inode, file, 1);
+       __unregister_ftrace_function_probe(glob, ops, data,
+                                         PROBE_TEST_FUNC | PROBE_TEST_DATA);
 }
 
-static int
-ftrace_notrace_open(struct inode *inode, struct file *file)
+void
+unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
 {
-       return ftrace_regex_open(inode, file, 0);
+       __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
+}
+
+void unregister_ftrace_function_probe_all(char *glob)
+{
+       __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
 }
 
-static ssize_t
-ftrace_regex_read(struct file *file, char __user *ubuf,
-                      size_t cnt, loff_t *ppos)
-{
-       if (file->f_mode & FMODE_READ)
-               return seq_read(file, ubuf, cnt, ppos);
-       else
-               return -EPERM;
+static LIST_HEAD(ftrace_commands);
+static DEFINE_MUTEX(ftrace_cmd_mutex);
+
+int register_ftrace_command(struct ftrace_func_command *cmd)
+{
+       struct ftrace_func_command *p;
+       int ret = 0;
+
+       mutex_lock(&ftrace_cmd_mutex);
+       list_for_each_entry(p, &ftrace_commands, list) {
+               if (strcmp(cmd->name, p->name) == 0) {
+                       ret = -EBUSY;
+                       goto out_unlock;
+               }
+       }
+       list_add(&cmd->list, &ftrace_commands);
+ out_unlock:
+       mutex_unlock(&ftrace_cmd_mutex);
+
+       return ret;
 }
 
-static loff_t
-ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
+int unregister_ftrace_command(struct ftrace_func_command *cmd)
 {
-       loff_t ret;
+       struct ftrace_func_command *p, *n;
+       int ret = -ENODEV;
 
-       if (file->f_mode & FMODE_READ)
-               ret = seq_lseek(file, offset, origin);
-       else
-               file->f_pos = ret = 1;
+       mutex_lock(&ftrace_cmd_mutex);
+       list_for_each_entry_safe(p, n, &ftrace_commands, list) {
+               if (strcmp(cmd->name, p->name) == 0) {
+                       ret = 0;
+                       list_del_init(&p->list);
+                       goto out_unlock;
+               }
+       }
+ out_unlock:
+       mutex_unlock(&ftrace_cmd_mutex);
 
        return ret;
 }
 
-enum {
-       MATCH_FULL,
-       MATCH_FRONT_ONLY,
-       MATCH_MIDDLE_ONLY,
-       MATCH_END_ONLY,
-};
-
-static void
-ftrace_match(unsigned char *buff, int len, int enable)
+static int ftrace_process_regex(char *buff, int len, int enable)
 {
-       char str[KSYM_SYMBOL_LEN];
-       char *search = NULL;
-       struct ftrace_page *pg;
-       struct dyn_ftrace *rec;
-       int type = MATCH_FULL;
-       unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
-       unsigned i, match = 0, search_len = 0;
+       char *func, *command, *next = buff;
+       struct ftrace_func_command *p;
+       int ret = -EINVAL;
 
-       for (i = 0; i < len; i++) {
-               if (buff[i] == '*') {
-                       if (!i) {
-                               search = buff + i + 1;
-                               type = MATCH_END_ONLY;
-                               search_len = len - (i + 1);
-                       } else {
-                               if (type == MATCH_END_ONLY) {
-                                       type = MATCH_MIDDLE_ONLY;
-                               } else {
-                                       match = i;
-                                       type = MATCH_FRONT_ONLY;
-                               }
-                               buff[i] = 0;
-                               break;
-                       }
-               }
+       func = strsep(&next, ":");
+
+       if (!next) {
+               ftrace_match_records(func, len, enable);
+               return 0;
        }
 
-       /* should not be called from interrupt context */
-       spin_lock(&ftrace_lock);
-       if (enable)
-               ftrace_filtered = 1;
-       pg = ftrace_pages_start;
-       while (pg) {
-               for (i = 0; i < pg->index; i++) {
-                       int matched = 0;
-                       char *ptr;
+       /* command found */
 
-                       rec = &pg->records[i];
-                       if (rec->flags & FTRACE_FL_FAILED)
-                               continue;
-                       kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
-                       switch (type) {
-                       case MATCH_FULL:
-                               if (strcmp(str, buff) == 0)
-                                       matched = 1;
-                               break;
-                       case MATCH_FRONT_ONLY:
-                               if (memcmp(str, buff, match) == 0)
-                                       matched = 1;
-                               break;
-                       case MATCH_MIDDLE_ONLY:
-                               if (strstr(str, search))
-                                       matched = 1;
-                               break;
-                       case MATCH_END_ONLY:
-                               ptr = strstr(str, search);
-                               if (ptr && (ptr[search_len] == 0))
-                                       matched = 1;
-                               break;
-                       }
-                       if (matched)
-                               rec->flags |= flag;
+       command = strsep(&next, ":");
+
+       mutex_lock(&ftrace_cmd_mutex);
+       list_for_each_entry(p, &ftrace_commands, list) {
+               if (strcmp(p->name, command) == 0) {
+                       ret = p->func(func, command, next, enable);
+                       goto out_unlock;
                }
-               pg = pg->next;
        }
-       spin_unlock(&ftrace_lock);
+ out_unlock:
+       mutex_unlock(&ftrace_cmd_mutex);
+
+       return ret;
 }
 
 static ssize_t
@@ -1113,9 +2199,8 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
                   size_t cnt, loff_t *ppos, int enable)
 {
        struct ftrace_iterator *iter;
-       char ch;
-       size_t read = 0;
-       ssize_t ret;
+       struct trace_parser *parser;
+       ssize_t ret, read;
 
        if (!cnt || cnt < 0)
                return 0;
@@ -1128,65 +2213,23 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
        } else
                iter = file->private_data;
 
-       if (!*ppos) {
-               iter->flags &= ~FTRACE_ITER_CONT;
-               iter->buffer_idx = 0;
-       }
-
-       ret = get_user(ch, ubuf++);
-       if (ret)
-               goto out;
-       read++;
-       cnt--;
-
-       if (!(iter->flags & ~FTRACE_ITER_CONT)) {
-               /* skip white space */
-               while (cnt && isspace(ch)) {
-                       ret = get_user(ch, ubuf++);
-                       if (ret)
-                               goto out;
-                       read++;
-                       cnt--;
-               }
-
-               if (isspace(ch)) {
-                       file->f_pos += read;
-                       ret = read;
-                       goto out;
-               }
-
-               iter->buffer_idx = 0;
-       }
+       parser = &iter->parser;
+       read = trace_get_user(parser, ubuf, cnt, ppos);
 
-       while (cnt && !isspace(ch)) {
-               if (iter->buffer_idx < FTRACE_BUFF_MAX)
-                       iter->buffer[iter->buffer_idx++] = ch;
-               else {
-                       ret = -EINVAL;
-                       goto out;
-               }
-               ret = get_user(ch, ubuf++);
+       if (trace_parser_loaded(parser) &&
+           !trace_parser_cont(parser)) {
+               ret = ftrace_process_regex(parser->buffer,
+                                          parser->idx, enable);
                if (ret)
                        goto out;
-               read++;
-               cnt--;
-       }
-
-       if (isspace(ch)) {
-               iter->filtered++;
-               iter->buffer[iter->buffer_idx] = 0;
-               ftrace_match(iter->buffer, iter->buffer_idx, enable);
-               iter->buffer_idx = 0;
-       } else
-               iter->flags |= FTRACE_ITER_CONT;
-
 
-       file->f_pos += read;
+               trace_parser_clear(parser);
+       }
 
        ret = read;
- out:
-       mutex_unlock(&ftrace_regex_lock);
 
+       mutex_unlock(&ftrace_regex_lock);
+out:
        return ret;
 }
 
@@ -1214,7 +2257,7 @@ ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
        if (reset)
                ftrace_filter_reset(enable);
        if (buf)
-               ftrace_match(buf, len, enable);
+               ftrace_match_records(buf, len, enable);
        mutex_unlock(&ftrace_regex_lock);
 }
 
@@ -1247,11 +2290,51 @@ void ftrace_set_notrace(unsigned char *buf, int len, int reset)
        ftrace_set_regex(buf, len, reset, 0);
 }
 
+/*
+ * command line interface to allow users to set filters on boot up.
+ */
+#define FTRACE_FILTER_SIZE             COMMAND_LINE_SIZE
+static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
+static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
+
+static int __init set_ftrace_notrace(char *str)
+{
+       strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
+       return 1;
+}
+__setup("ftrace_notrace=", set_ftrace_notrace);
+
+static int __init set_ftrace_filter(char *str)
+{
+       strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
+       return 1;
+}
+__setup("ftrace_filter=", set_ftrace_filter);
+
+static void __init set_ftrace_early_filter(char *buf, int enable)
+{
+       char *func;
+
+       while (buf) {
+               func = strsep(&buf, ",");
+               ftrace_set_regex(func, strlen(func), 0, enable);
+       }
+}
+
+static void __init set_ftrace_early_filters(void)
+{
+       if (ftrace_filter_buf[0])
+               set_ftrace_early_filter(ftrace_filter_buf, 1);
+       if (ftrace_notrace_buf[0])
+               set_ftrace_early_filter(ftrace_notrace_buf, 0);
+}
+
 static int
 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
 {
        struct seq_file *m = (struct seq_file *)file->private_data;
        struct ftrace_iterator *iter;
+       struct trace_parser *parser;
 
        mutex_lock(&ftrace_regex_lock);
        if (file->f_mode & FMODE_READ) {
@@ -1261,20 +2344,20 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable)
        } else
                iter = file->private_data;
 
-       if (iter->buffer_idx) {
-               iter->filtered++;
-               iter->buffer[iter->buffer_idx] = 0;
-               ftrace_match(iter->buffer, iter->buffer_idx, enable);
+       parser = &iter->parser;
+       if (trace_parser_loaded(parser)) {
+               parser->buffer[parser->idx] = 0;
+               ftrace_match_records(parser->buffer, parser->idx, enable);
        }
 
-       mutex_lock(&ftrace_sysctl_lock);
-       mutex_lock(&ftrace_start_lock);
+       mutex_lock(&ftrace_lock);
        if (ftrace_start_up && ftrace_enabled)
                ftrace_run_update_code(FTRACE_ENABLE_CALLS);
-       mutex_unlock(&ftrace_start_lock);
-       mutex_unlock(&ftrace_sysctl_lock);
+       mutex_unlock(&ftrace_lock);
 
+       trace_parser_put(parser);
        kfree(iter);
+
        mutex_unlock(&ftrace_regex_lock);
        return 0;
 }
@@ -1291,31 +2374,31 @@ ftrace_notrace_release(struct inode *inode, struct file *file)
        return ftrace_regex_release(inode, file, 0);
 }
 
-static struct file_operations ftrace_avail_fops = {
+static const struct file_operations ftrace_avail_fops = {
        .open = ftrace_avail_open,
        .read = seq_read,
        .llseek = seq_lseek,
-       .release = ftrace_avail_release,
+       .release = seq_release_private,
 };
 
-static struct file_operations ftrace_failures_fops = {
+static const struct file_operations ftrace_failures_fops = {
        .open = ftrace_failures_open,
        .read = seq_read,
        .llseek = seq_lseek,
-       .release = ftrace_avail_release,
+       .release = seq_release_private,
 };
 
-static struct file_operations ftrace_filter_fops = {
+static const struct file_operations ftrace_filter_fops = {
        .open = ftrace_filter_open,
-       .read = ftrace_regex_read,
+       .read = seq_read,
        .write = ftrace_filter_write,
        .llseek = ftrace_regex_lseek,
        .release = ftrace_filter_release,
 };
 
-static struct file_operations ftrace_notrace_fops = {
+static const struct file_operations ftrace_notrace_fops = {
        .open = ftrace_notrace_open,
-       .read = ftrace_regex_read,
+       .read = seq_read,
        .write = ftrace_notrace_write,
        .llseek = ftrace_regex_lseek,
        .release = ftrace_notrace_release,
@@ -1329,28 +2412,29 @@ int ftrace_graph_count;
 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
 
 static void *
-g_next(struct seq_file *m, void *v, loff_t *pos)
+__g_next(struct seq_file *m, loff_t *pos)
 {
-       unsigned long *array = m->private;
-       int index = *pos;
-
-       (*pos)++;
-
-       if (index >= ftrace_graph_count)
+       if (*pos >= ftrace_graph_count)
                return NULL;
+       return &ftrace_graph_funcs[*pos];
+}
 
-       return &array[index];
+static void *
+g_next(struct seq_file *m, void *v, loff_t *pos)
+{
+       (*pos)++;
+       return __g_next(m, pos);
 }
 
 static void *g_start(struct seq_file *m, loff_t *pos)
 {
-       void *p = NULL;
-
        mutex_lock(&graph_lock);
 
-       p = g_next(m, p, pos);
+       /* Nothing, tell g_show to print all functions are enabled */
+       if (!ftrace_graph_count && !*pos)
+               return (void *)1;
 
-       return p;
+       return __g_next(m, pos);
 }
 
 static void g_stop(struct seq_file *m, void *p)
@@ -1361,19 +2445,21 @@ static void g_stop(struct seq_file *m, void *p)
 static int g_show(struct seq_file *m, void *v)
 {
        unsigned long *ptr = v;
-       char str[KSYM_SYMBOL_LEN];
 
        if (!ptr)
                return 0;
 
-       kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
+       if (ptr == (unsigned long *)1) {
+               seq_printf(m, "#### all functions enabled ####\n");
+               return 0;
+       }
 
-       seq_printf(m, "%s\n", str);
+       seq_printf(m, "%ps\n", (void *)*ptr);
 
        return 0;
 }
 
-static struct seq_operations ftrace_graph_seq_ops = {
+static const struct seq_operations ftrace_graph_seq_ops = {
        .start = g_start,
        .next = g_next,
        .stop = g_stop,
@@ -1390,71 +2476,73 @@ ftrace_graph_open(struct inode *inode, struct file *file)
 
        mutex_lock(&graph_lock);
        if ((file->f_mode & FMODE_WRITE) &&
-           !(file->f_flags & O_APPEND)) {
+           (file->f_flags & O_TRUNC)) {
                ftrace_graph_count = 0;
                memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
        }
+       mutex_unlock(&graph_lock);
 
-       if (file->f_mode & FMODE_READ) {
+       if (file->f_mode & FMODE_READ)
                ret = seq_open(file, &ftrace_graph_seq_ops);
-               if (!ret) {
-                       struct seq_file *m = file->private_data;
-                       m->private = ftrace_graph_funcs;
-               }
-       } else
-               file->private_data = ftrace_graph_funcs;
-       mutex_unlock(&graph_lock);
 
        return ret;
 }
 
-static ssize_t
-ftrace_graph_read(struct file *file, char __user *ubuf,
-                      size_t cnt, loff_t *ppos)
+static int
+ftrace_graph_release(struct inode *inode, struct file *file)
 {
        if (file->f_mode & FMODE_READ)
-               return seq_read(file, ubuf, cnt, ppos);
-       else
-               return -EPERM;
+               seq_release(inode, file);
+       return 0;
 }
 
 static int
-ftrace_set_func(unsigned long *array, int idx, char *buffer)
+ftrace_set_func(unsigned long *array, int *idx, char *buffer)
 {
-       char str[KSYM_SYMBOL_LEN];
        struct dyn_ftrace *rec;
        struct ftrace_page *pg;
+       int search_len;
        int found = 0;
-       int i, j;
+       int type, not;
+       char *search;
+       bool exists;
+       int i;
 
        if (ftrace_disabled)
                return -ENODEV;
 
-       /* should not be called from interrupt context */
-       spin_lock(&ftrace_lock);
+       /* decode regex */
+       type = ftrace_setup_glob(buffer, strlen(buffer), &search, &not);
+       if (not)
+               return -EINVAL;
 
-       for (pg = ftrace_pages_start; pg; pg = pg->next) {
-               for (i = 0; i < pg->index; i++) {
-                       rec = &pg->records[i];
+       search_len = strlen(search);
 
-                       if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
-                               continue;
+       mutex_lock(&ftrace_lock);
+       do_for_each_ftrace_rec(pg, rec) {
+
+               if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
+                       break;
 
-                       kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
-                       if (strcmp(str, buffer) == 0) {
+               if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
+                       continue;
+
+               if (ftrace_match_record(rec, search, search_len, type)) {
+                       /* ensure it is not already in the array */
+                       exists = false;
+                       for (i = 0; i < *idx; i++)
+                               if (array[i] == rec->ip) {
+                                       exists = true;
+                                       break;
+                               }
+                       if (!exists) {
+                               array[(*idx)++] = rec->ip;
                                found = 1;
-                               for (j = 0; j < idx; j++)
-                                       if (array[j] == rec->ip) {
-                                               found = 0;
-                                               break;
-                                       }
-                               if (found)
-                                       array[idx] = rec->ip;
-                               break;
                        }
                }
-       }
-       spin_unlock(&ftrace_lock);
+       } while_for_each_ftrace_rec();
+
+       mutex_unlock(&ftrace_lock);
 
        return found ? 0 : -EINVAL;
 }
@@ -1463,12 +2551,9 @@ static ssize_t
 ftrace_graph_write(struct file *file, const char __user *ubuf,
                   size_t cnt, loff_t *ppos)
 {
-       unsigned char buffer[FTRACE_BUFF_MAX+1];
-       unsigned long *array;
+       struct trace_parser parser;
        size_t read = 0;
        ssize_t ret;
-       int index = 0;
-       char ch;
 
        if (!cnt || cnt < 0)
                return 0;
@@ -1480,105 +2565,58 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
                goto out;
        }
 
-       if (file->f_mode & FMODE_READ) {
-               struct seq_file *m = file->private_data;
-               array = m->private;
-       } else
-               array = file->private_data;
-
-       ret = get_user(ch, ubuf++);
-       if (ret)
+       if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
+               ret = -ENOMEM;
                goto out;
-       read++;
-       cnt--;
-
-       /* skip white space */
-       while (cnt && isspace(ch)) {
-               ret = get_user(ch, ubuf++);
-               if (ret)
-                       goto out;
-               read++;
-               cnt--;
        }
 
-       if (isspace(ch)) {
-               *ppos += read;
-               ret = read;
-               goto out;
-       }
+       read = trace_get_user(&parser, ubuf, cnt, ppos);
 
-       while (cnt && !isspace(ch)) {
-               if (index < FTRACE_BUFF_MAX)
-                       buffer[index++] = ch;
-               else {
-                       ret = -EINVAL;
-                       goto out;
-               }
-               ret = get_user(ch, ubuf++);
+       if (trace_parser_loaded((&parser))) {
+               parser.buffer[parser.idx] = 0;
+
+               /* we allow only one expression at a time */
+               ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
+                                       parser.buffer);
                if (ret)
                        goto out;
-               read++;
-               cnt--;
        }
-       buffer[index] = 0;
-
-       /* we allow only one at a time */
-       ret = ftrace_set_func(array, ftrace_graph_count, buffer);
-       if (ret)
-               goto out;
-
-       ftrace_graph_count++;
-
-       file->f_pos += read;
 
        ret = read;
  out:
+       trace_parser_put(&parser);
        mutex_unlock(&graph_lock);
 
        return ret;
 }
 
 static const struct file_operations ftrace_graph_fops = {
-       .open = ftrace_graph_open,
-       .read = ftrace_graph_read,
-       .write = ftrace_graph_write,
+       .open           = ftrace_graph_open,
+       .read           = seq_read,
+       .write          = ftrace_graph_write,
+       .release        = ftrace_graph_release,
 };
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 
 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
 {
-       struct dentry *entry;
 
-       entry = debugfs_create_file("available_filter_functions", 0444,
-                                   d_tracer, NULL, &ftrace_avail_fops);
-       if (!entry)
-               pr_warning("Could not create debugfs "
-                          "'available_filter_functions' entry\n");
+       trace_create_file("available_filter_functions", 0444,
+                       d_tracer, NULL, &ftrace_avail_fops);
 
-       entry = debugfs_create_file("failures", 0444,
-                                   d_tracer, NULL, &ftrace_failures_fops);
-       if (!entry)
-               pr_warning("Could not create debugfs 'failures' entry\n");
+       trace_create_file("failures", 0444,
+                       d_tracer, NULL, &ftrace_failures_fops);
 
-       entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
-                                   NULL, &ftrace_filter_fops);
-       if (!entry)
-               pr_warning("Could not create debugfs "
-                          "'set_ftrace_filter' entry\n");
+       trace_create_file("set_ftrace_filter", 0644, d_tracer,
+                       NULL, &ftrace_filter_fops);
 
-       entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
+       trace_create_file("set_ftrace_notrace", 0644, d_tracer,
                                    NULL, &ftrace_notrace_fops);
-       if (!entry)
-               pr_warning("Could not create debugfs "
-                          "'set_ftrace_notrace' entry\n");
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-       entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
+       trace_create_file("set_graph_function", 0444, d_tracer,
                                    NULL,
                                    &ftrace_graph_fops);
-       if (!entry)
-               pr_warning("Could not create debugfs "
-                          "'set_graph_function' entry\n");
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 
        return 0;
@@ -1592,7 +2630,7 @@ static int ftrace_convert_nops(struct module *mod,
        unsigned long addr;
        unsigned long flags;
 
-       mutex_lock(&ftrace_start_lock);
+       mutex_lock(&ftrace_lock);
        p = start;
        while (p < end) {
                addr = ftrace_call_adjust(*p++);
@@ -1611,19 +2649,77 @@ static int ftrace_convert_nops(struct module *mod,
        local_irq_save(flags);
        ftrace_update_code(mod);
        local_irq_restore(flags);
-       mutex_unlock(&ftrace_start_lock);
+       mutex_unlock(&ftrace_lock);
 
        return 0;
 }
 
-void ftrace_init_module(struct module *mod,
-                       unsigned long *start, unsigned long *end)
+#ifdef CONFIG_MODULES
+void ftrace_release(void *start, void *end)
+{
+       struct dyn_ftrace *rec;
+       struct ftrace_page *pg;
+       unsigned long s = (unsigned long)start;
+       unsigned long e = (unsigned long)end;
+
+       if (ftrace_disabled || !start || start == end)
+               return;
+
+       mutex_lock(&ftrace_lock);
+       do_for_each_ftrace_rec(pg, rec) {
+               if ((rec->ip >= s) && (rec->ip < e)) {
+                       /*
+                        * rec->ip is changed in ftrace_free_rec()
+                        * It should not between s and e if record was freed.
+                        */
+                       FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
+                       ftrace_free_rec(rec);
+               }
+       } while_for_each_ftrace_rec();
+       mutex_unlock(&ftrace_lock);
+}
+
+static void ftrace_init_module(struct module *mod,
+                              unsigned long *start, unsigned long *end)
 {
        if (ftrace_disabled || start == end)
                return;
        ftrace_convert_nops(mod, start, end);
 }
 
+static int ftrace_module_notify(struct notifier_block *self,
+                               unsigned long val, void *data)
+{
+       struct module *mod = data;
+
+       switch (val) {
+       case MODULE_STATE_COMING:
+               ftrace_init_module(mod, mod->ftrace_callsites,
+                                  mod->ftrace_callsites +
+                                  mod->num_ftrace_callsites);
+               break;
+       case MODULE_STATE_GOING:
+               ftrace_release(mod->ftrace_callsites,
+                              mod->ftrace_callsites +
+                              mod->num_ftrace_callsites);
+               break;
+       }
+
+       return 0;
+}
+#else
+static int ftrace_module_notify(struct notifier_block *self,
+                               unsigned long val, void *data)
+{
+       return 0;
+}
+#endif /* CONFIG_MODULES */
+
+struct notifier_block ftrace_module_nb = {
+       .notifier_call = ftrace_module_notify,
+       .priority = 0,
+};
+
 extern unsigned long __start_mcount_loc[];
 extern unsigned long __stop_mcount_loc[];
 
@@ -1655,6 +2751,12 @@ void __init ftrace_init(void)
                                  __start_mcount_loc,
                                  __stop_mcount_loc);
 
+       ret = register_module_notifier(&ftrace_module_nb);
+       if (ret)
+               pr_warning("Failed to register trace ftrace module notifier\n");
+
+       set_ftrace_early_filters();
+
        return;
  failed:
        ftrace_disabled = 1;
@@ -1688,7 +2790,7 @@ ftrace_pid_read(struct file *file, char __user *ubuf,
        if (ftrace_pid_trace == ftrace_swapper_pid)
                r = sprintf(buf, "swapper tasks\n");
        else if (ftrace_pid_trace)
-               r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace));
+               r = sprintf(buf, "%u\n", pid_vnr(ftrace_pid_trace));
        else
                r = sprintf(buf, "no pid\n");
 
@@ -1725,9 +2827,12 @@ static void clear_ftrace_pid(struct pid *pid)
 {
        struct task_struct *p;
 
+       rcu_read_lock();
        do_each_pid_task(pid, PIDTYPE_PID, p) {
                clear_tsk_trace_trace(p);
        } while_each_pid_task(pid, PIDTYPE_PID, p);
+       rcu_read_unlock();
+
        put_pid(pid);
 }
 
@@ -1735,9 +2840,11 @@ static void set_ftrace_pid(struct pid *pid)
 {
        struct task_struct *p;
 
+       rcu_read_lock();
        do_each_pid_task(pid, PIDTYPE_PID, p) {
                set_tsk_trace_trace(p);
        } while_each_pid_task(pid, PIDTYPE_PID, p);
+       rcu_read_unlock();
 }
 
 static void clear_ftrace_pid_task(struct pid **pid)
@@ -1779,7 +2886,7 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf,
        if (ret < 0)
                return ret;
 
-       mutex_lock(&ftrace_start_lock);
+       mutex_lock(&ftrace_lock);
        if (val < 0) {
                /* disable pid tracing */
                if (!ftrace_pid_trace)
@@ -1818,12 +2925,12 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf,
        ftrace_startup_enable(0);
 
  out:
-       mutex_unlock(&ftrace_start_lock);
+       mutex_unlock(&ftrace_lock);
 
        return cnt;
 }
 
-static struct file_operations ftrace_pid_fops = {
+static const struct file_operations ftrace_pid_fops = {
        .read = ftrace_pid_read,
        .write = ftrace_pid_write,
 };
@@ -1831,7 +2938,6 @@ static struct file_operations ftrace_pid_fops = {
 static __init int ftrace_init_debugfs(void)
 {
        struct dentry *d_tracer;
-       struct dentry *entry;
 
        d_tracer = tracing_init_dentry();
        if (!d_tracer)
@@ -1839,14 +2945,13 @@ static __init int ftrace_init_debugfs(void)
 
        ftrace_init_dyn_debugfs(d_tracer);
 
-       entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
-                                   NULL, &ftrace_pid_fops);
-       if (!entry)
-               pr_warning("Could not create debugfs "
-                          "'set_ftrace_pid' entry\n");
+       trace_create_file("set_ftrace_pid", 0644, d_tracer,
+                           NULL, &ftrace_pid_fops);
+
+       ftrace_profile_debugfs(d_tracer);
+
        return 0;
 }
-
 fs_initcall(ftrace_init_debugfs);
 
 /**
@@ -1881,17 +2986,17 @@ int register_ftrace_function(struct ftrace_ops *ops)
        if (unlikely(ftrace_disabled))
                return -1;
 
-       mutex_lock(&ftrace_sysctl_lock);
+       mutex_lock(&ftrace_lock);
 
        ret = __register_ftrace_function(ops);
        ftrace_startup(0);
 
-       mutex_unlock(&ftrace_sysctl_lock);
+       mutex_unlock(&ftrace_lock);
        return ret;
 }
 
 /**
- * unregister_ftrace_function - unresgister a function for profiling.
+ * unregister_ftrace_function - unregister a function for profiling.
  * @ops - ops structure that holds the function to unregister
  *
  * Unregister a function that was added to be called by ftrace profiling.
@@ -1900,17 +3005,17 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
 {
        int ret;
 
-       mutex_lock(&ftrace_sysctl_lock);
+       mutex_lock(&ftrace_lock);
        ret = __unregister_ftrace_function(ops);
        ftrace_shutdown(0);
-       mutex_unlock(&ftrace_sysctl_lock);
+       mutex_unlock(&ftrace_lock);
 
        return ret;
 }
 
 int
 ftrace_enable_sysctl(struct ctl_table *table, int write,
-                    struct file *file, void __user *buffer, size_t *lenp,
+                    void __user *buffer, size_t *lenp,
                     loff_t *ppos)
 {
        int ret;
@@ -1918,14 +3023,14 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
        if (unlikely(ftrace_disabled))
                return -ENODEV;
 
-       mutex_lock(&ftrace_sysctl_lock);
+       mutex_lock(&ftrace_lock);
 
-       ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
+       ret  = proc_dointvec(table, write, buffer, lenp, ppos);
 
-       if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
+       if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
                goto out;
 
-       last_ftrace_enabled = ftrace_enabled;
+       last_ftrace_enabled = !!ftrace_enabled;
 
        if (ftrace_enabled) {
 
@@ -1947,13 +3052,14 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
        }
 
  out:
-       mutex_unlock(&ftrace_sysctl_lock);
+       mutex_unlock(&ftrace_lock);
        return ret;
 }
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 
-static atomic_t ftrace_graph_active;
+static int ftrace_graph_active;
+static struct notifier_block ftrace_suspend_notifier;
 
 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
 {
@@ -1994,11 +3100,12 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
                }
 
                if (t->ret_stack == NULL) {
+                       atomic_set(&t->tracing_graph_pause, 0);
+                       atomic_set(&t->trace_overrun, 0);
                        t->curr_ret_stack = -1;
-                       /* Make sure IRQs see the -1 first: */
-                       barrier();
+                       /* Make sure the tasks see the -1 first: */
+                       smp_wmb();
                        t->ret_stack = ret_stack_list[start++];
-                       atomic_set(&t->trace_overrun, 0);
                }
        } while_each_thread(g, t);
 
@@ -2010,11 +3117,43 @@ free:
        return ret;
 }
 
+static void
+ftrace_graph_probe_sched_switch(struct rq *__rq, struct task_struct *prev,
+                               struct task_struct *next)
+{
+       unsigned long long timestamp;
+       int index;
+
+       /*
+        * Does the user want to count the time a function was asleep.
+        * If so, do not update the time stamps.
+        */
+       if (trace_flags & TRACE_ITER_SLEEP_TIME)
+               return;
+
+       timestamp = trace_clock_local();
+
+       prev->ftrace_timestamp = timestamp;
+
+       /* only process tasks that we timestamped */
+       if (!next->ftrace_timestamp)
+               return;
+
+       /*
+        * Update all the counters in next to make up for the
+        * time next was sleeping.
+        */
+       timestamp -= next->ftrace_timestamp;
+
+       for (index = next->curr_ret_stack; index >= 0; index--)
+               next->ret_stack[index].calltime += timestamp;
+}
+
 /* Allocate a return stack for each task */
 static int start_graph_tracing(void)
 {
        struct ftrace_ret_stack **ret_stack_list;
-       int ret;
+       int ret, cpu;
 
        ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
                                sizeof(struct ftrace_ret_stack *),
@@ -2023,25 +3162,68 @@ static int start_graph_tracing(void)
        if (!ret_stack_list)
                return -ENOMEM;
 
+       /* The cpu_boot init_task->ret_stack will never be freed */
+       for_each_online_cpu(cpu) {
+               if (!idle_task(cpu)->ret_stack)
+                       ftrace_graph_init_task(idle_task(cpu));
+       }
+
        do {
                ret = alloc_retstack_tasklist(ret_stack_list);
        } while (ret == -EAGAIN);
 
+       if (!ret) {
+               ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch);
+               if (ret)
+                       pr_info("ftrace_graph: Couldn't activate tracepoint"
+                               " probe to kernel_sched_switch\n");
+       }
+
        kfree(ret_stack_list);
        return ret;
 }
 
+/*
+ * Hibernation protection.
+ * The state of the current task is too much unstable during
+ * suspend/restore to disk. We want to protect against that.
+ */
+static int
+ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
+                                                       void *unused)
+{
+       switch (state) {
+       case PM_HIBERNATION_PREPARE:
+               pause_graph_tracing();
+               break;
+
+       case PM_POST_HIBERNATION:
+               unpause_graph_tracing();
+               break;
+       }
+       return NOTIFY_DONE;
+}
+
 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
                        trace_func_graph_ent_t entryfunc)
 {
        int ret = 0;
 
-       mutex_lock(&ftrace_sysctl_lock);
+       mutex_lock(&ftrace_lock);
+
+       /* we currently allow only one tracer registered at a time */
+       if (ftrace_graph_active) {
+               ret = -EBUSY;
+               goto out;
+       }
+
+       ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
+       register_pm_notifier(&ftrace_suspend_notifier);
 
-       atomic_inc(&ftrace_graph_active);
+       ftrace_graph_active++;
        ret = start_graph_tracing();
        if (ret) {
-               atomic_dec(&ftrace_graph_active);
+               ftrace_graph_active--;
                goto out;
        }
 
@@ -2051,35 +3233,50 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
        ftrace_startup(FTRACE_START_FUNC_RET);
 
 out:
-       mutex_unlock(&ftrace_sysctl_lock);
+       mutex_unlock(&ftrace_lock);
        return ret;
 }
 
 void unregister_ftrace_graph(void)
 {
-       mutex_lock(&ftrace_sysctl_lock);
+       mutex_lock(&ftrace_lock);
 
-       atomic_dec(&ftrace_graph_active);
+       if (unlikely(!ftrace_graph_active))
+               goto out;
+
+       ftrace_graph_active--;
+       unregister_trace_sched_switch(ftrace_graph_probe_sched_switch);
        ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
        ftrace_graph_entry = ftrace_graph_entry_stub;
        ftrace_shutdown(FTRACE_STOP_FUNC_RET);
+       unregister_pm_notifier(&ftrace_suspend_notifier);
 
-       mutex_unlock(&ftrace_sysctl_lock);
+ out:
+       mutex_unlock(&ftrace_lock);
 }
 
 /* Allocate a return stack for newly created task */
 void ftrace_graph_init_task(struct task_struct *t)
 {
-       if (atomic_read(&ftrace_graph_active)) {
-               t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
+       /* Make sure we do not use the parent ret_stack */
+       t->ret_stack = NULL;
+
+       if (ftrace_graph_active) {
+               struct ftrace_ret_stack *ret_stack;
+
+               ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
                                * sizeof(struct ftrace_ret_stack),
                                GFP_KERNEL);
-               if (!t->ret_stack)
+               if (!ret_stack)
                        return;
                t->curr_ret_stack = -1;
+               atomic_set(&t->tracing_graph_pause, 0);
                atomic_set(&t->trace_overrun, 0);
-       } else
-               t->ret_stack = NULL;
+               t->ftrace_timestamp = 0;
+               /* make curr_ret_stack visable before we add the ret_stack */
+               smp_wmb();
+               t->ret_stack = ret_stack;
+       }
 }
 
 void ftrace_graph_exit_task(struct task_struct *t)