nfsd/create race fixes, infrastructure
[safe/jmp/linux-2.6] / kernel / trace / ftrace.c
index 2e78628..2f32969 100644 (file)
@@ -47,8 +47,9 @@
 int ftrace_enabled __read_mostly;
 static int last_ftrace_enabled;
 
-/* ftrace_pid_trace >= 0 will only trace threads with this pid */
-static int ftrace_pid_trace = -1;
+/* set when tracing only a pid */
+struct pid *ftrace_pid_trace;
+static struct pid * const ftrace_swapper_pid = &init_struct_pid;
 
 /* Quick disabling of function tracer. */
 int function_trace_stop;
@@ -90,7 +91,7 @@ static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
 
 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
 {
-       if (current->pid != ftrace_pid_trace)
+       if (!test_tsk_trace_trace(current))
                return;
 
        ftrace_pid_function(ip, parent_ip);
@@ -153,7 +154,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
                else
                        func = ftrace_list_func;
 
-               if (ftrace_pid_trace >= 0) {
+               if (ftrace_pid_trace) {
                        set_ftrace_pid_function(func);
                        func = ftrace_pid_func;
                }
@@ -209,7 +210,7 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
                if (ftrace_list->next == &ftrace_list_end) {
                        ftrace_func_t func = ftrace_list->func;
 
-                       if (ftrace_pid_trace >= 0) {
+                       if (ftrace_pid_trace) {
                                set_ftrace_pid_function(func);
                                func = ftrace_pid_func;
                        }
@@ -239,7 +240,7 @@ static void ftrace_update_pid_func(void)
 
        func = ftrace_trace_function;
 
-       if (ftrace_pid_trace >= 0) {
+       if (ftrace_pid_trace) {
                set_ftrace_pid_function(func);
                func = ftrace_pid_func;
        } else {
@@ -1046,6 +1047,13 @@ ftrace_match(unsigned char *buff, int len, int enable)
        int type = MATCH_FULL;
        unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
        unsigned i, match = 0, search_len = 0;
+       int not = 0;
+
+       if (buff[0] == '!') {
+               not = 1;
+               buff++;
+               len--;
+       }
 
        for (i = 0; i < len; i++) {
                if (buff[i] == '*') {
@@ -1099,8 +1107,12 @@ ftrace_match(unsigned char *buff, int len, int enable)
                                        matched = 1;
                                break;
                        }
-                       if (matched)
-                               rec->flags |= flag;
+                       if (matched) {
+                               if (not)
+                                       rec->flags &= ~flag;
+                               else
+                                       rec->flags |= flag;
+                       }
                }
                pg = pg->next;
        }
@@ -1320,6 +1332,230 @@ static struct file_operations ftrace_notrace_fops = {
        .release = ftrace_notrace_release,
 };
 
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+static DEFINE_MUTEX(graph_lock);
+
+int ftrace_graph_count;
+unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
+
+static void *
+g_next(struct seq_file *m, void *v, loff_t *pos)
+{
+       unsigned long *array = m->private;
+       int index = *pos;
+
+       (*pos)++;
+
+       if (index >= ftrace_graph_count)
+               return NULL;
+
+       return &array[index];
+}
+
+static void *g_start(struct seq_file *m, loff_t *pos)
+{
+       void *p = NULL;
+
+       mutex_lock(&graph_lock);
+
+       p = g_next(m, p, pos);
+
+       return p;
+}
+
+static void g_stop(struct seq_file *m, void *p)
+{
+       mutex_unlock(&graph_lock);
+}
+
+static int g_show(struct seq_file *m, void *v)
+{
+       unsigned long *ptr = v;
+       char str[KSYM_SYMBOL_LEN];
+
+       if (!ptr)
+               return 0;
+
+       kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
+
+       seq_printf(m, "%s\n", str);
+
+       return 0;
+}
+
+static struct seq_operations ftrace_graph_seq_ops = {
+       .start = g_start,
+       .next = g_next,
+       .stop = g_stop,
+       .show = g_show,
+};
+
+static int
+ftrace_graph_open(struct inode *inode, struct file *file)
+{
+       int ret = 0;
+
+       if (unlikely(ftrace_disabled))
+               return -ENODEV;
+
+       mutex_lock(&graph_lock);
+       if ((file->f_mode & FMODE_WRITE) &&
+           !(file->f_flags & O_APPEND)) {
+               ftrace_graph_count = 0;
+               memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
+       }
+
+       if (file->f_mode & FMODE_READ) {
+               ret = seq_open(file, &ftrace_graph_seq_ops);
+               if (!ret) {
+                       struct seq_file *m = file->private_data;
+                       m->private = ftrace_graph_funcs;
+               }
+       } else
+               file->private_data = ftrace_graph_funcs;
+       mutex_unlock(&graph_lock);
+
+       return ret;
+}
+
+static ssize_t
+ftrace_graph_read(struct file *file, char __user *ubuf,
+                      size_t cnt, loff_t *ppos)
+{
+       if (file->f_mode & FMODE_READ)
+               return seq_read(file, ubuf, cnt, ppos);
+       else
+               return -EPERM;
+}
+
+static int
+ftrace_set_func(unsigned long *array, int idx, char *buffer)
+{
+       char str[KSYM_SYMBOL_LEN];
+       struct dyn_ftrace *rec;
+       struct ftrace_page *pg;
+       int found = 0;
+       int i, j;
+
+       if (ftrace_disabled)
+               return -ENODEV;
+
+       /* should not be called from interrupt context */
+       spin_lock(&ftrace_lock);
+
+       for (pg = ftrace_pages_start; pg; pg = pg->next) {
+               for (i = 0; i < pg->index; i++) {
+                       rec = &pg->records[i];
+
+                       if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
+                               continue;
+
+                       kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
+                       if (strcmp(str, buffer) == 0) {
+                               found = 1;
+                               for (j = 0; j < idx; j++)
+                                       if (array[j] == rec->ip) {
+                                               found = 0;
+                                               break;
+                                       }
+                               if (found)
+                                       array[idx] = rec->ip;
+                               break;
+                       }
+               }
+       }
+       spin_unlock(&ftrace_lock);
+
+       return found ? 0 : -EINVAL;
+}
+
+static ssize_t
+ftrace_graph_write(struct file *file, const char __user *ubuf,
+                  size_t cnt, loff_t *ppos)
+{
+       unsigned char buffer[FTRACE_BUFF_MAX+1];
+       unsigned long *array;
+       size_t read = 0;
+       ssize_t ret;
+       int index = 0;
+       char ch;
+
+       if (!cnt || cnt < 0)
+               return 0;
+
+       mutex_lock(&graph_lock);
+
+       if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
+               ret = -EBUSY;
+               goto out;
+       }
+
+       if (file->f_mode & FMODE_READ) {
+               struct seq_file *m = file->private_data;
+               array = m->private;
+       } else
+               array = file->private_data;
+
+       ret = get_user(ch, ubuf++);
+       if (ret)
+               goto out;
+       read++;
+       cnt--;
+
+       /* skip white space */
+       while (cnt && isspace(ch)) {
+               ret = get_user(ch, ubuf++);
+               if (ret)
+                       goto out;
+               read++;
+               cnt--;
+       }
+
+       if (isspace(ch)) {
+               *ppos += read;
+               ret = read;
+               goto out;
+       }
+
+       while (cnt && !isspace(ch)) {
+               if (index < FTRACE_BUFF_MAX)
+                       buffer[index++] = ch;
+               else {
+                       ret = -EINVAL;
+                       goto out;
+               }
+               ret = get_user(ch, ubuf++);
+               if (ret)
+                       goto out;
+               read++;
+               cnt--;
+       }
+       buffer[index] = 0;
+
+       /* we allow only one at a time */
+       ret = ftrace_set_func(array, ftrace_graph_count, buffer);
+       if (ret)
+               goto out;
+
+       ftrace_graph_count++;
+
+       file->f_pos += read;
+
+       ret = read;
+ out:
+       mutex_unlock(&graph_lock);
+
+       return ret;
+}
+
+static const struct file_operations ftrace_graph_fops = {
+       .open = ftrace_graph_open,
+       .read = ftrace_graph_read,
+       .write = ftrace_graph_write,
+};
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
 {
        struct dentry *entry;
@@ -1347,6 +1583,15 @@ static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
                pr_warning("Could not create debugfs "
                           "'set_ftrace_notrace' entry\n");
 
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
+                                   NULL,
+                                   &ftrace_graph_fops);
+       if (!entry)
+               pr_warning("Could not create debugfs "
+                          "'set_graph_function' entry\n");
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
        return 0;
 }
 
@@ -1451,18 +1696,84 @@ ftrace_pid_read(struct file *file, char __user *ubuf,
        char buf[64];
        int r;
 
-       if (ftrace_pid_trace >= 0)
-               r = sprintf(buf, "%u\n", ftrace_pid_trace);
+       if (ftrace_pid_trace == ftrace_swapper_pid)
+               r = sprintf(buf, "swapper tasks\n");
+       else if (ftrace_pid_trace)
+               r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace));
        else
                r = sprintf(buf, "no pid\n");
 
        return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 }
 
+static void clear_ftrace_swapper(void)
+{
+       struct task_struct *p;
+       int cpu;
+
+       get_online_cpus();
+       for_each_online_cpu(cpu) {
+               p = idle_task(cpu);
+               clear_tsk_trace_trace(p);
+       }
+       put_online_cpus();
+}
+
+static void set_ftrace_swapper(void)
+{
+       struct task_struct *p;
+       int cpu;
+
+       get_online_cpus();
+       for_each_online_cpu(cpu) {
+               p = idle_task(cpu);
+               set_tsk_trace_trace(p);
+       }
+       put_online_cpus();
+}
+
+static void clear_ftrace_pid(struct pid *pid)
+{
+       struct task_struct *p;
+
+       do_each_pid_task(pid, PIDTYPE_PID, p) {
+               clear_tsk_trace_trace(p);
+       } while_each_pid_task(pid, PIDTYPE_PID, p);
+       put_pid(pid);
+}
+
+static void set_ftrace_pid(struct pid *pid)
+{
+       struct task_struct *p;
+
+       do_each_pid_task(pid, PIDTYPE_PID, p) {
+               set_tsk_trace_trace(p);
+       } while_each_pid_task(pid, PIDTYPE_PID, p);
+}
+
+static void clear_ftrace_pid_task(struct pid **pid)
+{
+       if (*pid == ftrace_swapper_pid)
+               clear_ftrace_swapper();
+       else
+               clear_ftrace_pid(*pid);
+
+       *pid = NULL;
+}
+
+static void set_ftrace_pid_task(struct pid *pid)
+{
+       if (pid == ftrace_swapper_pid)
+               set_ftrace_swapper();
+       else
+               set_ftrace_pid(pid);
+}
+
 static ssize_t
 ftrace_pid_write(struct file *filp, const char __user *ubuf,
                   size_t cnt, loff_t *ppos)
 {
+       struct pid *pid;
        char buf[64];
        long val;
        int ret;
@@ -1480,18 +1791,37 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf,
                return ret;
 
        mutex_lock(&ftrace_start_lock);
-       if (ret < 0) {
+       if (val < 0) {
                /* disable pid tracing */
-               if (ftrace_pid_trace < 0)
+               if (!ftrace_pid_trace)
                        goto out;
-               ftrace_pid_trace = -1;
+
+               clear_ftrace_pid_task(&ftrace_pid_trace);
 
        } else {
+               /* swapper task is special */
+               if (!val) {
+                       pid = ftrace_swapper_pid;
+                       if (pid == ftrace_pid_trace)
+                               goto out;
+               } else {
+                       pid = find_get_pid(val);
 
-               if (ftrace_pid_trace == val)
+                       if (pid == ftrace_pid_trace) {
+                               put_pid(pid);
+                               goto out;
+                       }
+               }
+
+               if (ftrace_pid_trace)
+                       clear_ftrace_pid_task(&ftrace_pid_trace);
+
+               if (!pid)
                        goto out;
 
-               ftrace_pid_trace = val;
+               ftrace_pid_trace = pid;
+
+               set_ftrace_pid_task(ftrace_pid_trace);
        }
 
        /* update the function call */
@@ -1636,11 +1966,15 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
 
 static atomic_t ftrace_graph_active;
 
+int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
+{
+       return 0;
+}
+
 /* The callbacks that hook a function */
 trace_func_graph_ret_t ftrace_graph_return =
                        (trace_func_graph_ret_t)ftrace_stub;
-trace_func_graph_ent_t ftrace_graph_entry =
-                       (trace_func_graph_ent_t)ftrace_stub;
+trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
 
 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
@@ -1675,6 +2009,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
                        /* Make sure IRQs see the -1 first: */
                        barrier();
                        t->ret_stack = ret_stack_list[start++];
+                       atomic_set(&t->tracing_graph_pause, 0);
                        atomic_set(&t->trace_overrun, 0);
                }
        } while_each_thread(g, t);
@@ -1738,7 +2073,7 @@ void unregister_ftrace_graph(void)
 
        atomic_dec(&ftrace_graph_active);
        ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
-       ftrace_graph_entry = (trace_func_graph_ent_t)ftrace_stub;
+       ftrace_graph_entry = ftrace_graph_entry_stub;
        ftrace_shutdown(FTRACE_STOP_FUNC_RET);
 
        mutex_unlock(&ftrace_sysctl_lock);
@@ -1754,6 +2089,7 @@ void ftrace_graph_init_task(struct task_struct *t)
                if (!t->ret_stack)
                        return;
                t->curr_ret_stack = -1;
+               atomic_set(&t->tracing_graph_pause, 0);
                atomic_set(&t->trace_overrun, 0);
        } else
                t->ret_stack = NULL;
@@ -1769,5 +2105,10 @@ void ftrace_graph_exit_task(struct task_struct *t)
 
        kfree(ret_stack);
 }
+
+void ftrace_graph_stop(void)
+{
+       ftrace_stop();
+}
 #endif