net: skb ftracer - Add config option to enable new ftracer (v3)
[safe/jmp/linux-2.6] / kernel / trace / ftrace.c
index 2074e5b..1e1d23c 100644 (file)
@@ -291,7 +291,9 @@ function_stat_next(void *v, int idx)
        pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
 
  again:
-       rec++;
+       if (idx != 0)
+               rec++;
+
        if ((void *)rec >= (void *)&pg->records[pg->index]) {
                pg = pg->next;
                if (!pg)
@@ -599,7 +601,7 @@ function_profile_call(unsigned long ip, unsigned long parent_ip)
        local_irq_save(flags);
 
        stat = &__get_cpu_var(ftrace_profile_stats);
-       if (!stat->hash)
+       if (!stat->hash || !ftrace_profile_enabled)
                goto out;
 
        rec = ftrace_find_profiled_func(stat, ip);
@@ -630,7 +632,7 @@ static void profile_graph_return(struct ftrace_graph_ret *trace)
 
        local_irq_save(flags);
        stat = &__get_cpu_var(ftrace_profile_stats);
-       if (!stat->hash)
+       if (!stat->hash || !ftrace_profile_enabled)
                goto out;
 
        calltime = trace->rettime - trace->calltime;
@@ -724,6 +726,10 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf,
                        ftrace_profile_enabled = 1;
                } else {
                        ftrace_profile_enabled = 0;
+                       /*
+                        * unregister_ftrace_profiler calls stop_machine
+                        * so this acts like an synchronize_sched.
+                        */
                        unregister_ftrace_profiler();
                }
        }
@@ -762,7 +768,7 @@ static struct tracer_stat function_stats __initdata = {
        .stat_show      = function_stat_show
 };
 
-static void ftrace_profile_debugfs(struct dentry *d_tracer)
+static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
 {
        struct ftrace_profile_stat *stat;
        struct dentry *entry;
@@ -780,7 +786,6 @@ static void ftrace_profile_debugfs(struct dentry *d_tracer)
                         * The files created are permanent, if something happens
                         * we still do not free memory.
                         */
-                       kfree(stat);
                        WARN(1,
                             "Could not allocate stat file for cpu %d\n",
                             cpu);
@@ -807,7 +812,7 @@ static void ftrace_profile_debugfs(struct dentry *d_tracer)
 }
 
 #else /* CONFIG_FUNCTION_PROFILER */
-static void ftrace_profile_debugfs(struct dentry *d_tracer)
+static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
 {
 }
 #endif /* CONFIG_FUNCTION_PROFILER */
@@ -1220,6 +1225,13 @@ static void ftrace_shutdown(int command)
                return;
 
        ftrace_start_up--;
+       /*
+        * Just warn in case of unbalance, no need to kill ftrace, it's not
+        * critical but the ftrace_call callers may be never nopped again after
+        * further ftrace uses.
+        */
+       WARN_ON_ONCE(ftrace_start_up < 0);
+
        if (!ftrace_start_up)
                command |= FTRACE_DISABLE_CALLS;
 
@@ -1406,10 +1418,20 @@ static void *t_hash_start(struct seq_file *m, loff_t *pos)
 {
        struct ftrace_iterator *iter = m->private;
        void *p = NULL;
+       loff_t l;
+
+       if (!(iter->flags & FTRACE_ITER_HASH))
+               *pos = 0;
 
        iter->flags |= FTRACE_ITER_HASH;
 
-       return t_hash_next(m, p, pos);
+       iter->hidx = 0;
+       for (l = 0; l <= *pos; ) {
+               p = t_hash_next(m, p, &l);
+               if (!p)
+                       break;
+       }
+       return p;
 }
 
 static int t_hash_show(struct seq_file *m, void *v)
@@ -1456,8 +1478,6 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
                        iter->pg = iter->pg->next;
                        iter->idx = 0;
                        goto retry;
-               } else {
-                       iter->idx = -1;
                }
        } else {
                rec = &iter->pg->records[iter->idx++];
@@ -1486,6 +1506,7 @@ static void *t_start(struct seq_file *m, loff_t *pos)
 {
        struct ftrace_iterator *iter = m->private;
        void *p = NULL;
+       loff_t l;
 
        mutex_lock(&ftrace_lock);
        /*
@@ -1497,23 +1518,21 @@ static void *t_start(struct seq_file *m, loff_t *pos)
                if (*pos > 0)
                        return t_hash_start(m, pos);
                iter->flags |= FTRACE_ITER_PRINTALL;
-               (*pos)++;
                return iter;
        }
 
        if (iter->flags & FTRACE_ITER_HASH)
                return t_hash_start(m, pos);
 
-       if (*pos > 0) {
-               if (iter->idx < 0)
-                       return p;
-               (*pos)--;
-               iter->idx--;
+       iter->pg = ftrace_pages_start;
+       iter->idx = 0;
+       for (l = 0; l <= *pos; ) {
+               p = t_next(m, p, &l);
+               if (!p)
+                       break;
        }
 
-       p = t_next(m, p, pos);
-
-       if (!p)
+       if (!p && iter->flags & FTRACE_ITER_FILTER)
                return t_hash_start(m, pos);
 
        return p;
@@ -1643,7 +1662,7 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable)
 
        mutex_lock(&ftrace_regex_lock);
        if ((file->f_mode & FMODE_WRITE) &&
-           !(file->f_flags & O_APPEND))
+           (file->f_flags & O_TRUNC))
                ftrace_filter_reset(enable);
 
        if (file->f_mode & FMODE_READ) {
@@ -2489,32 +2508,31 @@ int ftrace_graph_count;
 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
 
 static void *
-g_next(struct seq_file *m, void *v, loff_t *pos)
+__g_next(struct seq_file *m, loff_t *pos)
 {
        unsigned long *array = m->private;
-       int index = *pos;
-
-       (*pos)++;
 
-       if (index >= ftrace_graph_count)
+       if (*pos >= ftrace_graph_count)
                return NULL;
+       return &array[*pos];
+}
 
-       return &array[index];
+static void *
+g_next(struct seq_file *m, void *v, loff_t *pos)
+{
+       (*pos)++;
+       return __g_next(m, pos);
 }
 
 static void *g_start(struct seq_file *m, loff_t *pos)
 {
-       void *p = NULL;
-
        mutex_lock(&graph_lock);
 
        /* Nothing, tell g_show to print all functions are enabled */
        if (!ftrace_graph_count && !*pos)
                return (void *)1;
 
-       p = g_next(m, p, pos);
-
-       return p;
+       return __g_next(m, pos);
 }
 
 static void g_stop(struct seq_file *m, void *p)
@@ -2559,7 +2577,7 @@ ftrace_graph_open(struct inode *inode, struct file *file)
 
        mutex_lock(&graph_lock);
        if ((file->f_mode & FMODE_WRITE) &&
-           !(file->f_flags & O_APPEND)) {
+           (file->f_flags & O_TRUNC)) {
                ftrace_graph_count = 0;
                memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
        }
@@ -2578,6 +2596,14 @@ ftrace_graph_open(struct inode *inode, struct file *file)
 }
 
 static int
+ftrace_graph_release(struct inode *inode, struct file *file)
+{
+       if (file->f_mode & FMODE_READ)
+               seq_release(inode, file);
+       return 0;
+}
+
+static int
 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
 {
        struct dyn_ftrace *rec;
@@ -2706,9 +2732,10 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
 }
 
 static const struct file_operations ftrace_graph_fops = {
-       .open = ftrace_graph_open,
-       .read = seq_read,
-       .write = ftrace_graph_write,
+       .open           = ftrace_graph_open,
+       .read           = seq_read,
+       .write          = ftrace_graph_write,
+       .release        = ftrace_graph_release,
 };
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 
@@ -3141,10 +3168,10 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
 
        ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
 
-       if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
+       if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
                goto out;
 
-       last_ftrace_enabled = ftrace_enabled;
+       last_ftrace_enabled = !!ftrace_enabled;
 
        if (ftrace_enabled) {
 
@@ -3214,12 +3241,12 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
                }
 
                if (t->ret_stack == NULL) {
-                       t->curr_ret_stack = -1;
-                       /* Make sure IRQs see the -1 first: */
-                       barrier();
-                       t->ret_stack = ret_stack_list[start++];
                        atomic_set(&t->tracing_graph_pause, 0);
                        atomic_set(&t->trace_overrun, 0);
+                       t->curr_ret_stack = -1;
+                       /* Make sure the tasks see the -1 first: */
+                       smp_wmb();
+                       t->ret_stack = ret_stack_list[start++];
                }
        } while_each_thread(g, t);
 
@@ -3277,8 +3304,10 @@ static int start_graph_tracing(void)
                return -ENOMEM;
 
        /* The cpu_boot init_task->ret_stack will never be freed */
-       for_each_online_cpu(cpu)
-               ftrace_graph_init_task(idle_task(cpu));
+       for_each_online_cpu(cpu) {
+               if (!idle_task(cpu)->ret_stack)
+                       ftrace_graph_init_task(idle_task(cpu));
+       }
 
        do {
                ret = alloc_retstack_tasklist(ret_stack_list);
@@ -3370,18 +3399,25 @@ void unregister_ftrace_graph(void)
 /* Allocate a return stack for newly created task */
 void ftrace_graph_init_task(struct task_struct *t)
 {
+       /* Make sure we do not use the parent ret_stack */
+       t->ret_stack = NULL;
+
        if (ftrace_graph_active) {
-               t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
+               struct ftrace_ret_stack *ret_stack;
+
+               ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
                                * sizeof(struct ftrace_ret_stack),
                                GFP_KERNEL);
-               if (!t->ret_stack)
+               if (!ret_stack)
                        return;
                t->curr_ret_stack = -1;
                atomic_set(&t->tracing_graph_pause, 0);
                atomic_set(&t->trace_overrun, 0);
                t->ftrace_timestamp = 0;
-       } else
-               t->ret_stack = NULL;
+               /* make curr_ret_stack visable before we add the ret_stack */
+               smp_wmb();
+               t->ret_stack = ret_stack;
+       }
 }
 
 void ftrace_graph_exit_task(struct task_struct *t)