tracing: Do not record user stack trace from NMI context
[safe/jmp/linux-2.6] / kernel / trace / trace.c
index 31118ae..e52683f 100644 (file)
@@ -12,7 +12,7 @@
  *  Copyright (C) 2004 William Lee Irwin III
  */
 #include <linux/ring_buffer.h>
-#include <linux/utsrelease.h>
+#include <generated/utsrelease.h>
 #include <linux/stacktrace.h>
 #include <linux/writeback.h>
 #include <linux/kallsyms.h>
@@ -32,6 +32,7 @@
 #include <linux/splice.h>
 #include <linux/kdebug.h>
 #include <linux/string.h>
+#include <linux/rwsem.h>
 #include <linux/ctype.h>
 #include <linux/init.h>
 #include <linux/poll.h>
@@ -102,9 +103,6 @@ static inline void ftrace_enable_cpu(void)
 
 static cpumask_var_t __read_mostly     tracing_buffer_mask;
 
-/* Define which cpu buffers are currently read in trace_pipe */
-static cpumask_var_t                   tracing_reader_cpumask;
-
 #define for_each_tracing_cpu(cpu)      \
        for_each_cpu(cpu, tracing_buffer_mask)
 
@@ -243,12 +241,91 @@ static struct tracer              *current_trace __read_mostly;
 
 /*
  * trace_types_lock is used to protect the trace_types list.
- * This lock is also used to keep user access serialized.
- * Accesses from userspace will grab this lock while userspace
- * activities happen inside the kernel.
  */
 static DEFINE_MUTEX(trace_types_lock);
 
+/*
+ * serialize the access of the ring buffer
+ *
+ * ring buffer serializes readers, but it is low level protection.
+ * The validity of the events (which returns by ring_buffer_peek() ..etc)
+ * are not protected by ring buffer.
+ *
+ * The content of events may become garbage if we allow other process consumes
+ * these events concurrently:
+ *   A) the page of the consumed events may become a normal page
+ *      (not reader page) in ring buffer, and this page will be rewrited
+ *      by events producer.
+ *   B) The page of the consumed events may become a page for splice_read,
+ *      and this page will be returned to system.
+ *
+ * These primitives allow multi process access to different cpu ring buffer
+ * concurrently.
+ *
+ * These primitives don't distinguish read-only and read-consume access.
+ * Multi read-only access are also serialized.
+ */
+
+#ifdef CONFIG_SMP
+static DECLARE_RWSEM(all_cpu_access_lock);
+static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
+
+static inline void trace_access_lock(int cpu)
+{
+       if (cpu == TRACE_PIPE_ALL_CPU) {
+               /* gain it for accessing the whole ring buffer. */
+               down_write(&all_cpu_access_lock);
+       } else {
+               /* gain it for accessing a cpu ring buffer. */
+
+               /* Firstly block other trace_access_lock(TRACE_PIPE_ALL_CPU). */
+               down_read(&all_cpu_access_lock);
+
+               /* Secondly block other access to this @cpu ring buffer. */
+               mutex_lock(&per_cpu(cpu_access_lock, cpu));
+       }
+}
+
+static inline void trace_access_unlock(int cpu)
+{
+       if (cpu == TRACE_PIPE_ALL_CPU) {
+               up_write(&all_cpu_access_lock);
+       } else {
+               mutex_unlock(&per_cpu(cpu_access_lock, cpu));
+               up_read(&all_cpu_access_lock);
+       }
+}
+
+static inline void trace_access_lock_init(void)
+{
+       int cpu;
+
+       for_each_possible_cpu(cpu)
+               mutex_init(&per_cpu(cpu_access_lock, cpu));
+}
+
+#else
+
+static DEFINE_MUTEX(access_lock);
+
+static inline void trace_access_lock(int cpu)
+{
+       (void)cpu;
+       mutex_lock(&access_lock);
+}
+
+static inline void trace_access_unlock(int cpu)
+{
+       (void)cpu;
+       mutex_unlock(&access_lock);
+}
+
+static inline void trace_access_lock_init(void)
+{
+}
+
+#endif
+
 /* trace_wait is a waitqueue for tasks blocked on trace_poll */
 static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
 
@@ -297,6 +374,21 @@ static int __init set_buf_size(char *str)
 }
 __setup("trace_buf_size=", set_buf_size);
 
+static int __init set_tracing_thresh(char *str)
+{
+       unsigned long threshhold;
+       int ret;
+
+       if (!str)
+               return 0;
+       ret = strict_strtoul(str, 0, &threshhold);
+       if (ret < 0)
+               return 0;
+       tracing_thresh = threshhold * 1000;
+       return 1;
+}
+__setup("tracing_thresh=", set_tracing_thresh);
+
 unsigned long nsecs_to_usecs(unsigned long nsecs)
 {
        return nsecs / 1000;
@@ -313,7 +405,6 @@ static const char *trace_options[] = {
        "bin",
        "block",
        "stacktrace",
-       "sched-tree",
        "trace_printk",
        "ftrace_preempt",
        "branch",
@@ -503,9 +594,10 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
 static arch_spinlock_t ftrace_max_lock =
        (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
 
+unsigned long __read_mostly    tracing_thresh;
+
 #ifdef CONFIG_TRACER_MAX_TRACE
 unsigned long __read_mostly    tracing_max_latency;
-unsigned long __read_mostly    tracing_thresh;
 
 /*
  * Copy the new maximum trace into the separate maximum-trace
@@ -516,7 +608,7 @@ static void
 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
 {
        struct trace_array_cpu *data = tr->data[cpu];
-       struct trace_array_cpu *max_data = tr->data[cpu];
+       struct trace_array_cpu *max_data;
 
        max_tr.cpu = cpu;
        max_tr.time_start = data->preempt_timestamp;
@@ -526,7 +618,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
        max_data->critical_start = data->critical_start;
        max_data->critical_end = data->critical_end;
 
-       memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
+       memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
        max_data->pid = tsk->pid;
        max_data->uid = task_uid(tsk);
        max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
@@ -748,10 +840,10 @@ out:
        mutex_unlock(&trace_types_lock);
 }
 
-static void __tracing_reset(struct trace_array *tr, int cpu)
+static void __tracing_reset(struct ring_buffer *buffer, int cpu)
 {
        ftrace_disable_cpu();
-       ring_buffer_reset_cpu(tr->buffer, cpu);
+       ring_buffer_reset_cpu(buffer, cpu);
        ftrace_enable_cpu();
 }
 
@@ -763,7 +855,7 @@ void tracing_reset(struct trace_array *tr, int cpu)
 
        /* Make sure all commits have finished */
        synchronize_sched();
-       __tracing_reset(tr, cpu);
+       __tracing_reset(buffer, cpu);
 
        ring_buffer_record_enable(buffer);
 }
@@ -781,7 +873,7 @@ void tracing_reset_online_cpus(struct trace_array *tr)
        tr->time_start = ftrace_now(tr->cpu);
 
        for_each_online_cpu(cpu)
-               __tracing_reset(tr, cpu);
+               __tracing_reset(buffer, cpu);
 
        ring_buffer_record_enable(buffer);
 }
@@ -858,6 +950,8 @@ void tracing_start(void)
                goto out;
        }
 
+       /* Prevent the buffers from switching */
+       arch_spin_lock(&ftrace_max_lock);
 
        buffer = global_trace.buffer;
        if (buffer)
@@ -867,6 +961,8 @@ void tracing_start(void)
        if (buffer)
                ring_buffer_record_enable(buffer);
 
+       arch_spin_unlock(&ftrace_max_lock);
+
        ftrace_start();
  out:
        spin_unlock_irqrestore(&tracing_start_lock, flags);
@@ -888,6 +984,9 @@ void tracing_stop(void)
        if (trace_stop_count++)
                goto out;
 
+       /* Prevent the buffers from switching */
+       arch_spin_lock(&ftrace_max_lock);
+
        buffer = global_trace.buffer;
        if (buffer)
                ring_buffer_record_disable(buffer);
@@ -896,6 +995,8 @@ void tracing_stop(void)
        if (buffer)
                ring_buffer_record_disable(buffer);
 
+       arch_spin_unlock(&ftrace_max_lock);
+
  out:
        spin_unlock_irqrestore(&tracing_start_lock, flags);
 }
@@ -952,6 +1053,11 @@ void trace_find_cmdline(int pid, char comm[])
                return;
        }
 
+       if (WARN_ON_ONCE(pid < 0)) {
+               strcpy(comm, "<XXX>");
+               return;
+       }
+
        if (pid > PID_MAX_DEFAULT) {
                strcpy(comm, "<...>");
                return;
@@ -1151,6 +1257,22 @@ void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
        __ftrace_trace_stack(tr->buffer, flags, skip, pc);
 }
 
+/**
+ * trace_dump_stack - record a stack back trace in the trace buffer
+ */
+void trace_dump_stack(void)
+{
+       unsigned long flags;
+
+       if (tracing_disabled || tracing_selftest_running)
+               return;
+
+       local_save_flags(flags);
+
+       /* skipping 3 traces, seems to get us at the caller of this function */
+       __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count());
+}
+
 void
 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
 {
@@ -1162,6 +1284,13 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
        if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
                return;
 
+       /*
+        * NMIs can not handle page faults, even with fix ups.
+        * The save user stack can (and often does) fault.
+        */
+       if (unlikely(in_nmi()))
+               return;
+
        event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
                                          sizeof(*entry), flags, pc);
        if (!event)
@@ -1300,8 +1429,10 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
        entry->fmt                      = fmt;
 
        memcpy(entry->buf, trace_buf, sizeof(u32) * len);
-       if (!filter_check_discard(call, entry, buffer, event))
+       if (!filter_check_discard(call, entry, buffer, event)) {
                ring_buffer_unlock_commit(buffer, event);
+               ftrace_trace_stack(buffer, flags, 6, pc);
+       }
 
 out_unlock:
        arch_spin_unlock(&trace_buf_lock);
@@ -1374,8 +1505,10 @@ int trace_array_vprintk(struct trace_array *tr,
 
        memcpy(&entry->buf, trace_buf, len);
        entry->buf[len] = '\0';
-       if (!filter_check_discard(call, entry, buffer, event))
+       if (!filter_check_discard(call, entry, buffer, event)) {
                ring_buffer_unlock_commit(buffer, event);
+               ftrace_trace_stack(buffer, irq_flags, 6, pc);
+       }
 
  out_unlock:
        arch_spin_unlock(&trace_buf_lock);
@@ -1565,12 +1698,6 @@ static void tracing_iter_reset(struct trace_iterator *iter, int cpu)
 }
 
 /*
- * No necessary locking here. The worst thing which can
- * happen is loosing events consumed at the same time
- * by a trace_pipe reader.
- * Other than that, we don't risk to crash the ring buffer
- * because it serializes the readers.
- *
  * The current tracer is copied to avoid a global locking
  * all around.
  */
@@ -1608,6 +1735,7 @@ static void *s_start(struct seq_file *m, loff_t *pos)
 
                ftrace_enable_cpu();
 
+               iter->leftover = 0;
                for (p = iter; p && l < *pos; p = s_next(m, p, &l))
                        ;
 
@@ -1625,12 +1753,16 @@ static void *s_start(struct seq_file *m, loff_t *pos)
        }
 
        trace_event_read_lock();
+       trace_access_lock(cpu_file);
        return p;
 }
 
 static void s_stop(struct seq_file *m, void *p)
 {
+       struct trace_iterator *iter = m->private;
+
        atomic_dec(&trace_record_cmdline_disabled);
+       trace_access_unlock(iter->cpu_file);
        trace_event_read_unlock();
 }
 
@@ -2316,67 +2448,49 @@ static const struct file_operations tracing_cpumask_fops = {
        .write          = tracing_cpumask_write,
 };
 
-static ssize_t
-tracing_trace_options_read(struct file *filp, char __user *ubuf,
-                      size_t cnt, loff_t *ppos)
+static int tracing_trace_options_show(struct seq_file *m, void *v)
 {
        struct tracer_opt *trace_opts;
        u32 tracer_flags;
-       int len = 0;
-       char *buf;
-       int r = 0;
        int i;
 
-
-       /* calculate max size */
-       for (i = 0; trace_options[i]; i++) {
-               len += strlen(trace_options[i]);
-               len += 3; /* "no" and newline */
-       }
-
        mutex_lock(&trace_types_lock);
        tracer_flags = current_trace->flags->val;
        trace_opts = current_trace->flags->opts;
 
-       /*
-        * Increase the size with names of options specific
-        * of the current tracer.
-        */
-       for (i = 0; trace_opts[i].name; i++) {
-               len += strlen(trace_opts[i].name);
-               len += 3; /* "no" and newline */
-       }
-
-       /* +1 for \0 */
-       buf = kmalloc(len + 1, GFP_KERNEL);
-       if (!buf) {
-               mutex_unlock(&trace_types_lock);
-               return -ENOMEM;
-       }
-
        for (i = 0; trace_options[i]; i++) {
                if (trace_flags & (1 << i))
-                       r += sprintf(buf + r, "%s\n", trace_options[i]);
+                       seq_printf(m, "%s\n", trace_options[i]);
                else
-                       r += sprintf(buf + r, "no%s\n", trace_options[i]);
+                       seq_printf(m, "no%s\n", trace_options[i]);
        }
 
        for (i = 0; trace_opts[i].name; i++) {
                if (tracer_flags & trace_opts[i].bit)
-                       r += sprintf(buf + r, "%s\n",
-                               trace_opts[i].name);
+                       seq_printf(m, "%s\n", trace_opts[i].name);
                else
-                       r += sprintf(buf + r, "no%s\n",
-                               trace_opts[i].name);
+                       seq_printf(m, "no%s\n", trace_opts[i].name);
        }
        mutex_unlock(&trace_types_lock);
 
-       WARN_ON(r >= len + 1);
+       return 0;
+}
 
-       r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+static int __set_tracer_option(struct tracer *trace,
+                              struct tracer_flags *tracer_flags,
+                              struct tracer_opt *opts, int neg)
+{
+       int ret;
 
-       kfree(buf);
-       return r;
+       ret = trace->set_flag(tracer_flags->val, opts->bit, !neg);
+       if (ret)
+               return ret;
+
+       if (neg)
+               tracer_flags->val &= ~opts->bit;
+       else
+               tracer_flags->val |= opts->bit;
+       return 0;
 }
 
 /* Try to assign a tracer specific option */
@@ -2384,33 +2498,17 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
 {
        struct tracer_flags *tracer_flags = trace->flags;
        struct tracer_opt *opts = NULL;
-       int ret = 0, i = 0;
-       int len;
+       int i;
 
        for (i = 0; tracer_flags->opts[i].name; i++) {
                opts = &tracer_flags->opts[i];
-               len = strlen(opts->name);
 
-               if (strncmp(cmp, opts->name, len) == 0) {
-                       ret = trace->set_flag(tracer_flags->val,
-                               opts->bit, !neg);
-                       break;
-               }
+               if (strcmp(cmp, opts->name) == 0)
+                       return __set_tracer_option(trace, trace->flags,
+                                                  opts, neg);
        }
-       /* Not found */
-       if (!tracer_flags->opts[i].name)
-               return -EINVAL;
-
-       /* Refused to handle */
-       if (ret)
-               return ret;
-
-       if (neg)
-               tracer_flags->val &= ~opts->bit;
-       else
-               tracer_flags->val |= opts->bit;
 
-       return 0;
+       return -EINVAL;
 }
 
 static void set_tracer_flags(unsigned int mask, int enabled)
@@ -2430,7 +2528,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
                        size_t cnt, loff_t *ppos)
 {
        char buf[64];
-       char *cmp = buf;
+       char *cmp;
        int neg = 0;
        int ret;
        int i;
@@ -2442,16 +2540,15 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
                return -EFAULT;
 
        buf[cnt] = 0;
+       cmp = strstrip(buf);
 
-       if (strncmp(buf, "no", 2) == 0) {
+       if (strncmp(cmp, "no", 2) == 0) {
                neg = 1;
                cmp += 2;
        }
 
        for (i = 0; trace_options[i]; i++) {
-               int len = strlen(trace_options[i]);
-
-               if (strncmp(cmp, trace_options[i], len) == 0) {
+               if (strcmp(cmp, trace_options[i]) == 0) {
                        set_tracer_flags(1 << i, !neg);
                        break;
                }
@@ -2471,9 +2568,18 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
        return cnt;
 }
 
+static int tracing_trace_options_open(struct inode *inode, struct file *file)
+{
+       if (tracing_disabled)
+               return -ENODEV;
+       return single_open(file, tracing_trace_options_show, NULL);
+}
+
 static const struct file_operations tracing_iter_fops = {
-       .open           = tracing_open_generic,
-       .read           = tracing_trace_options_read,
+       .open           = tracing_trace_options_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
        .write          = tracing_trace_options_write,
 };
 
@@ -2847,22 +2953,6 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
 
        mutex_lock(&trace_types_lock);
 
-       /* We only allow one reader per cpu */
-       if (cpu_file == TRACE_PIPE_ALL_CPU) {
-               if (!cpumask_empty(tracing_reader_cpumask)) {
-                       ret = -EBUSY;
-                       goto out;
-               }
-               cpumask_setall(tracing_reader_cpumask);
-       } else {
-               if (!cpumask_test_cpu(cpu_file, tracing_reader_cpumask))
-                       cpumask_set_cpu(cpu_file, tracing_reader_cpumask);
-               else {
-                       ret = -EBUSY;
-                       goto out;
-               }
-       }
-
        /* create a buffer to store the information to pass to userspace */
        iter = kzalloc(sizeof(*iter), GFP_KERNEL);
        if (!iter) {
@@ -2918,12 +3008,6 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
 
        mutex_lock(&trace_types_lock);
 
-       if (iter->cpu_file == TRACE_PIPE_ALL_CPU)
-               cpumask_clear(tracing_reader_cpumask);
-       else
-               cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask);
-
-
        if (iter->trace->pipe_close)
                iter->trace->pipe_close(iter);
 
@@ -3085,6 +3169,7 @@ waitagain:
        iter->pos = -1;
 
        trace_event_read_lock();
+       trace_access_lock(iter->cpu_file);
        while (find_next_entry_inc(iter) != NULL) {
                enum print_line_t ret;
                int len = iter->seq.len;
@@ -3101,6 +3186,7 @@ waitagain:
                if (iter->seq.len >= cnt)
                        break;
        }
+       trace_access_unlock(iter->cpu_file);
        trace_event_read_unlock();
 
        /* Now copy what we have to the user */
@@ -3226,6 +3312,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
        }
 
        trace_event_read_lock();
+       trace_access_lock(iter->cpu_file);
 
        /* Fill as many pages as possible. */
        for (i = 0, rem = len; i < PIPE_BUFFERS && rem; i++) {
@@ -3249,6 +3336,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
                trace_seq_init(&iter->seq);
        }
 
+       trace_access_unlock(iter->cpu_file);
        trace_event_read_unlock();
        mutex_unlock(&iter->mutex);
 
@@ -3392,21 +3480,18 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
        return cnt;
 }
 
-static ssize_t tracing_clock_read(struct file *filp, char __user *ubuf,
-                                 size_t cnt, loff_t *ppos)
+static int tracing_clock_show(struct seq_file *m, void *v)
 {
-       char buf[64];
-       int bufiter = 0;
        int i;
 
        for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
-               bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter,
+               seq_printf(m,
                        "%s%s%s%s", i ? " " : "",
                        i == trace_clock_id ? "[" : "", trace_clocks[i].name,
                        i == trace_clock_id ? "]" : "");
-       bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter, "\n");
+       seq_putc(m, '\n');
 
-       return simple_read_from_buffer(ubuf, cnt, ppos, buf, bufiter);
+       return 0;
 }
 
 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
@@ -3448,6 +3533,13 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
        return cnt;
 }
 
+static int tracing_clock_open(struct inode *inode, struct file *file)
+{
+       if (tracing_disabled)
+               return -ENODEV;
+       return single_open(file, tracing_clock_show, NULL);
+}
+
 static const struct file_operations tracing_max_lat_fops = {
        .open           = tracing_open_generic,
        .read           = tracing_max_lat_read,
@@ -3486,8 +3578,10 @@ static const struct file_operations tracing_mark_fops = {
 };
 
 static const struct file_operations trace_clock_fops = {
-       .open           = tracing_open_generic,
-       .read           = tracing_clock_read,
+       .open           = tracing_clock_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
        .write          = tracing_clock_write,
 };
 
@@ -3544,10 +3638,12 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
 
        info->read = 0;
 
+       trace_access_lock(info->cpu);
        ret = ring_buffer_read_page(info->tr->buffer,
                                    &info->spare,
                                    count,
                                    info->cpu, 0);
+       trace_access_unlock(info->cpu);
        if (ret < 0)
                return 0;
 
@@ -3675,6 +3771,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
                len &= PAGE_MASK;
        }
 
+       trace_access_lock(info->cpu);
        entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
 
        for (i = 0; i < PIPE_BUFFERS && len && entries; i++, len -= PAGE_SIZE) {
@@ -3722,6 +3819,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
                entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
        }
 
+       trace_access_unlock(info->cpu);
        spd.nr_pages = i;
 
        /* did we read anything? */
@@ -3948,39 +4046,16 @@ trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
        if (ret < 0)
                return ret;
 
-       ret = 0;
-       switch (val) {
-       case 0:
-               /* do nothing if already cleared */
-               if (!(topt->flags->val & topt->opt->bit))
-                       break;
-
-               mutex_lock(&trace_types_lock);
-               if (current_trace->set_flag)
-                       ret = current_trace->set_flag(topt->flags->val,
-                                                     topt->opt->bit, 0);
-               mutex_unlock(&trace_types_lock);
-               if (ret)
-                       return ret;
-               topt->flags->val &= ~topt->opt->bit;
-               break;
-       case 1:
-               /* do nothing if already set */
-               if (topt->flags->val & topt->opt->bit)
-                       break;
+       if (val != 0 && val != 1)
+               return -EINVAL;
 
+       if (!!(topt->flags->val & topt->opt->bit) != val) {
                mutex_lock(&trace_types_lock);
-               if (current_trace->set_flag)
-                       ret = current_trace->set_flag(topt->flags->val,
-                                                     topt->opt->bit, 1);
+               ret = __set_tracer_option(current_trace, topt->flags,
+                                         topt->opt, !val);
                mutex_unlock(&trace_types_lock);
                if (ret)
                        return ret;
-               topt->flags->val |= topt->opt->bit;
-               break;
-
-       default:
-               return -EINVAL;
        }
 
        *ppos += cnt;
@@ -4181,6 +4256,8 @@ static __init int tracer_init_debugfs(void)
        struct dentry *d_tracer;
        int cpu;
 
+       trace_access_lock_init();
+
        d_tracer = tracing_init_dentry();
 
        trace_create_file("tracing_enabled", 0644, d_tracer,
@@ -4204,10 +4281,10 @@ static __init int tracer_init_debugfs(void)
 #ifdef CONFIG_TRACER_MAX_TRACE
        trace_create_file("tracing_max_latency", 0644, d_tracer,
                        &tracing_max_latency, &tracing_max_lat_fops);
+#endif
 
        trace_create_file("tracing_thresh", 0644, d_tracer,
                        &tracing_thresh, &tracing_max_lat_fops);
-#endif
 
        trace_create_file("README", 0444, d_tracer,
                        NULL, &tracing_readme_fops);
@@ -4415,9 +4492,6 @@ __init static int tracer_alloc_buffers(void)
        if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
                goto out_free_buffer_mask;
 
-       if (!zalloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL))
-               goto out_free_tracing_cpumask;
-
        /* To save memory, keep the ring buffer size to its minimum */
        if (ring_buffer_expanded)
                ring_buf_size = trace_buf_size;
@@ -4475,8 +4549,6 @@ __init static int tracer_alloc_buffers(void)
        return 0;
 
 out_free_cpumask:
-       free_cpumask_var(tracing_reader_cpumask);
-out_free_tracing_cpumask:
        free_cpumask_var(tracing_cpumask);
 out_free_buffer_mask:
        free_cpumask_var(tracing_buffer_mask);