trace: stop all recording to ring buffer on ftrace_dump
[safe/jmp/linux-2.6] / kernel / trace / trace.c
index 216bbe7..4d89e84 100644 (file)
 unsigned long __read_mostly    tracing_max_latency = (cycle_t)ULONG_MAX;
 unsigned long __read_mostly    tracing_thresh;
 
+/*
+ * We need to change this state when a selftest is running.
+ * A selftest will lurk into the ring-buffer to count the
+ * entries inserted during the selftest although some concurrent
+ * insertions into the ring-buffer such as ftrace_printk could occurred
+ * at the same time, giving false positive or negative results.
+ */
+static bool __read_mostly tracing_selftest_running;
+
+/* For tracers that don't implement custom flags */
+static struct tracer_opt dummy_tracer_opt[] = {
+       { }
+};
+
+static struct tracer_flags dummy_tracer_flags = {
+       .val = 0,
+       .opts = dummy_tracer_opt
+};
+
+static int dummy_set_flag(u32 old_flags, u32 bit, int set)
+{
+       return 0;
+}
 
 /*
  * Kill all tracing for good (never come back).
@@ -66,10 +89,10 @@ static inline void ftrace_enable_cpu(void)
        preempt_enable();
 }
 
-static cpumask_t __read_mostly         tracing_buffer_mask;
+static cpumask_var_t __read_mostly     tracing_buffer_mask;
 
 #define for_each_tracing_cpu(cpu)      \
-       for_each_cpu_mask(cpu, tracing_buffer_mask)
+       for_each_cpu(cpu, tracing_buffer_mask)
 
 /*
  * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
@@ -204,8 +227,9 @@ static DEFINE_MUTEX(trace_types_lock);
 /* trace_wait is a waitqueue for tasks blocked on trace_poll */
 static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
 
-/* trace_flags holds iter_ctrl options */
-unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK;
+/* trace_flags holds trace_options default values */
+unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
+       TRACE_ITER_ANNOTATE;
 
 /**
  * trace_wake_up - wake up tasks waiting for trace input
@@ -244,13 +268,6 @@ unsigned long nsecs_to_usecs(unsigned long nsecs)
        return nsecs / 1000;
 }
 
-/*
- * TRACE_ITER_SYM_MASK masks the options in trace_flags that
- * control the output of kernel symbols.
- */
-#define TRACE_ITER_SYM_MASK \
-       (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
-
 /* These must match the bit postions in trace_iterator_flags */
 static const char *trace_options[] = {
        "print-parent",
@@ -265,6 +282,11 @@ static const char *trace_options[] = {
        "sched-tree",
        "ftrace_printk",
        "ftrace_preempt",
+       "branch",
+       "annotate",
+       "userstacktrace",
+       "sym-userobj",
+       "printk-msg-only",
        NULL
 };
 
@@ -298,7 +320,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
 
        memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
        data->pid = tsk->pid;
-       data->uid = tsk->uid;
+       data->uid = task_uid(tsk);
        data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
        data->policy = tsk->policy;
        data->rt_priority = tsk->rt_priority;
@@ -411,6 +433,28 @@ trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
        return trace_seq_putmem(s, hex, j);
 }
 
+static int
+trace_seq_path(struct trace_seq *s, struct path *path)
+{
+       unsigned char *p;
+
+       if (s->len >= (PAGE_SIZE - 1))
+               return 0;
+       p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
+       if (!IS_ERR(p)) {
+               p = mangle_path(s->buffer + s->len, p, "\n");
+               if (p) {
+                       s->len = p - s->buffer;
+                       return 1;
+               }
+       } else {
+               s->buffer[s->len++] = '?';
+               return 1;
+       }
+
+       return 0;
+}
+
 static void
 trace_seq_reset(struct trace_seq *s)
 {
@@ -522,7 +566,17 @@ int register_tracer(struct tracer *type)
                return -1;
        }
 
+       /*
+        * When this gets called we hold the BKL which means that
+        * preemption is disabled. Various trace selftests however
+        * need to disable and enable preemption for successful tests.
+        * So we drop the BKL here and grab it after the tests again.
+        */
+       unlock_kernel();
        mutex_lock(&trace_types_lock);
+
+       tracing_selftest_running = true;
+
        for (t = trace_types; t; t = t->next) {
                if (strcmp(type->name, t->name) == 0) {
                        /* already found */
@@ -533,11 +587,20 @@ int register_tracer(struct tracer *type)
                }
        }
 
+       if (!type->set_flag)
+               type->set_flag = &dummy_set_flag;
+       if (!type->flags)
+               type->flags = &dummy_tracer_flags;
+       else
+               if (!type->flags->opts)
+                       type->flags->opts = dummy_tracer_opt;
+
 #ifdef CONFIG_FTRACE_STARTUP_TEST
        if (type->selftest) {
                struct tracer *saved_tracer = current_trace;
                struct trace_array *tr = &global_trace;
                int i;
+
                /*
                 * Run a selftest on this tracer.
                 * Here we reset the trace buffer, and set the current
@@ -545,9 +608,9 @@ int register_tracer(struct tracer *type)
                 * internal tracing to verify that everything is in order.
                 * If we fail, we do not register this tracer.
                 */
-               for_each_tracing_cpu(i) {
+               for_each_tracing_cpu(i)
                        tracing_reset(tr, i);
-               }
+
                current_trace = type;
                /* the test is responsible for initializing and enabling */
                pr_info("Testing tracer %s: ", type->name);
@@ -559,9 +622,9 @@ int register_tracer(struct tracer *type)
                        goto out;
                }
                /* Only reset on passing, to avoid touching corrupted buffers */
-               for_each_tracing_cpu(i) {
+               for_each_tracing_cpu(i)
                        tracing_reset(tr, i);
-               }
+
                printk(KERN_CONT "PASSED\n");
        }
 #endif
@@ -573,7 +636,9 @@ int register_tracer(struct tracer *type)
                max_tracer_type_len = len;
 
  out:
+       tracing_selftest_running = false;
        mutex_unlock(&trace_types_lock);
+       lock_kernel();
 
        return ret;
 }
@@ -613,6 +678,16 @@ void tracing_reset(struct trace_array *tr, int cpu)
        ftrace_enable_cpu();
 }
 
+void tracing_reset_online_cpus(struct trace_array *tr)
+{
+       int cpu;
+
+       tr->time_start = ftrace_now(tr->cpu);
+
+       for_each_online_cpu(cpu)
+               tracing_reset(tr, cpu);
+}
+
 #define SAVED_CMDLINES 128
 static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
 static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
@@ -634,6 +709,21 @@ static int trace_stop_count;
 static DEFINE_SPINLOCK(tracing_start_lock);
 
 /**
+ * ftrace_off_permanent - disable all ftrace code permanently
+ *
+ * This should only be called when a serious anomally has
+ * been detected.  This will turn off the function tracing,
+ * ring buffers, and other tracing utilites. It takes no
+ * locks and can be called from any context.
+ */
+void ftrace_off_permanent(void)
+{
+       tracing_disabled = 1;
+       ftrace_stop();
+       tracing_off_permanent();
+}
+
+/**
  * tracing_start - quick start of the tracer
  *
  * If tracing is enabled but was stopped by tracing_stop,
@@ -737,7 +827,7 @@ static void trace_save_cmdline(struct task_struct *tsk)
        spin_unlock(&trace_cmdline_lock);
 }
 
-static char *trace_find_cmdline(int pid)
+char *trace_find_cmdline(int pid)
 {
        char *cmdline = "<...>";
        unsigned map;
@@ -774,6 +864,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
 
        entry->preempt_count            = pc & 0xff;
        entry->pid                      = (tsk) ? tsk->pid : 0;
+       entry->tgid                     = (tsk) ? tsk->tgid : 0;
        entry->flags =
 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
                (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
@@ -810,6 +901,56 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data,
        ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
 }
 
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+static void __trace_graph_entry(struct trace_array *tr,
+                               struct trace_array_cpu *data,
+                               struct ftrace_graph_ent *trace,
+                               unsigned long flags,
+                               int pc)
+{
+       struct ring_buffer_event *event;
+       struct ftrace_graph_ent_entry *entry;
+       unsigned long irq_flags;
+
+       if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
+               return;
+
+       event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
+                                        &irq_flags);
+       if (!event)
+               return;
+       entry   = ring_buffer_event_data(event);
+       tracing_generic_entry_update(&entry->ent, flags, pc);
+       entry->ent.type                 = TRACE_GRAPH_ENT;
+       entry->graph_ent                        = *trace;
+       ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
+}
+
+static void __trace_graph_return(struct trace_array *tr,
+                               struct trace_array_cpu *data,
+                               struct ftrace_graph_ret *trace,
+                               unsigned long flags,
+                               int pc)
+{
+       struct ring_buffer_event *event;
+       struct ftrace_graph_ret_entry *entry;
+       unsigned long irq_flags;
+
+       if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
+               return;
+
+       event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
+                                        &irq_flags);
+       if (!event)
+               return;
+       entry   = ring_buffer_event_data(event);
+       tracing_generic_entry_update(&entry->ent, flags, pc);
+       entry->ent.type                 = TRACE_GRAPH_RET;
+       entry->ret                              = *trace;
+       ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
+}
+#endif
+
 void
 ftrace(struct trace_array *tr, struct trace_array_cpu *data,
        unsigned long ip, unsigned long parent_ip, unsigned long flags,
@@ -861,6 +1002,46 @@ void __trace_stack(struct trace_array *tr,
        ftrace_trace_stack(tr, data, flags, skip, preempt_count());
 }
 
+static void ftrace_trace_userstack(struct trace_array *tr,
+                  struct trace_array_cpu *data,
+                  unsigned long flags, int pc)
+{
+#ifdef CONFIG_STACKTRACE
+       struct ring_buffer_event *event;
+       struct userstack_entry *entry;
+       struct stack_trace trace;
+       unsigned long irq_flags;
+
+       if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
+               return;
+
+       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
+                                        &irq_flags);
+       if (!event)
+               return;
+       entry   = ring_buffer_event_data(event);
+       tracing_generic_entry_update(&entry->ent, flags, pc);
+       entry->ent.type         = TRACE_USER_STACK;
+
+       memset(&entry->caller, 0, sizeof(entry->caller));
+
+       trace.nr_entries        = 0;
+       trace.max_entries       = FTRACE_STACK_ENTRIES;
+       trace.skip              = 0;
+       trace.entries           = entry->caller;
+
+       save_stack_trace_user(&trace);
+       ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+#endif
+}
+
+void __trace_userstack(struct trace_array *tr,
+                  struct trace_array_cpu *data,
+                  unsigned long flags)
+{
+       ftrace_trace_userstack(tr, data, flags, preempt_count());
+}
+
 static void
 ftrace_trace_special(void *__tr, void *__data,
                     unsigned long arg1, unsigned long arg2, unsigned long arg3,
@@ -884,6 +1065,7 @@ ftrace_trace_special(void *__tr, void *__data,
        entry->arg3                     = arg3;
        ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
        ftrace_trace_stack(tr, data, irq_flags, 4, pc);
+       ftrace_trace_userstack(tr, data, irq_flags, pc);
 
        trace_wake_up();
 }
@@ -922,6 +1104,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
        entry->next_cpu = task_cpu(next);
        ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
        ftrace_trace_stack(tr, data, flags, 5, pc);
+       ftrace_trace_userstack(tr, data, flags, pc);
 }
 
 void
@@ -951,6 +1134,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
        entry->next_cpu                 = task_cpu(wakee);
        ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
        ftrace_trace_stack(tr, data, flags, 6, pc);
+       ftrace_trace_userstack(tr, data, flags, pc);
 
        trace_wake_up();
 }
@@ -1024,7 +1208,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
         * Need to use raw, since this must be called before the
         * recursive protection is performed.
         */
-       raw_local_irq_save(flags);
+       local_irq_save(flags);
        cpu = raw_smp_processor_id();
        data = tr->data[cpu];
        disabled = atomic_inc_return(&data->disabled);
@@ -1035,9 +1219,66 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
        }
 
        atomic_dec(&data->disabled);
-       raw_local_irq_restore(flags);
+       local_irq_restore(flags);
 }
 
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+int trace_graph_entry(struct ftrace_graph_ent *trace)
+{
+       struct trace_array *tr = &global_trace;
+       struct trace_array_cpu *data;
+       unsigned long flags;
+       long disabled;
+       int cpu;
+       int pc;
+
+       if (!ftrace_trace_task(current))
+               return 0;
+
+       if (!ftrace_graph_addr(trace->func))
+               return 0;
+
+       local_irq_save(flags);
+       cpu = raw_smp_processor_id();
+       data = tr->data[cpu];
+       disabled = atomic_inc_return(&data->disabled);
+       if (likely(disabled == 1)) {
+               pc = preempt_count();
+               __trace_graph_entry(tr, data, trace, flags, pc);
+       }
+       /* Only do the atomic if it is not already set */
+       if (!test_tsk_trace_graph(current))
+               set_tsk_trace_graph(current);
+       atomic_dec(&data->disabled);
+       local_irq_restore(flags);
+
+       return 1;
+}
+
+void trace_graph_return(struct ftrace_graph_ret *trace)
+{
+       struct trace_array *tr = &global_trace;
+       struct trace_array_cpu *data;
+       unsigned long flags;
+       long disabled;
+       int cpu;
+       int pc;
+
+       local_irq_save(flags);
+       cpu = raw_smp_processor_id();
+       data = tr->data[cpu];
+       disabled = atomic_inc_return(&data->disabled);
+       if (likely(disabled == 1)) {
+               pc = preempt_count();
+               __trace_graph_return(tr, data, trace, flags, pc);
+       }
+       if (!trace->depth)
+               clear_tsk_trace_graph(current);
+       atomic_dec(&data->disabled);
+       local_irq_restore(flags);
+}
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
 static struct ftrace_ops trace_ops __read_mostly =
 {
        .func = function_trace_call,
@@ -1065,9 +1306,10 @@ void tracing_stop_function_trace(void)
 
 enum trace_file_type {
        TRACE_FILE_LAT_FMT      = 1,
+       TRACE_FILE_ANNOTATE     = 2,
 };
 
-static void trace_iterator_increment(struct trace_iterator *iter, int cpu)
+static void trace_iterator_increment(struct trace_iterator *iter)
 {
        /* Don't allow ftrace to trace into the ring buffers */
        ftrace_disable_cpu();
@@ -1146,7 +1388,7 @@ static void *find_next_entry_inc(struct trace_iterator *iter)
        iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts);
 
        if (iter->ent)
-               trace_iterator_increment(iter, iter->cpu);
+               trace_iterator_increment(iter);
 
        return iter->ent ? iter : NULL;
 }
@@ -1285,7 +1527,7 @@ seq_print_sym_offset(struct trace_seq *s, const char *fmt,
 # define IP_FMT "%016lx"
 #endif
 
-static int
+int
 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
 {
        int ret;
@@ -1306,6 +1548,78 @@ seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
        return ret;
 }
 
+static inline int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
+                                   unsigned long ip, unsigned long sym_flags)
+{
+       struct file *file = NULL;
+       unsigned long vmstart = 0;
+       int ret = 1;
+
+       if (mm) {
+               const struct vm_area_struct *vma;
+
+               down_read(&mm->mmap_sem);
+               vma = find_vma(mm, ip);
+               if (vma) {
+                       file = vma->vm_file;
+                       vmstart = vma->vm_start;
+               }
+               if (file) {
+                       ret = trace_seq_path(s, &file->f_path);
+                       if (ret)
+                               ret = trace_seq_printf(s, "[+0x%lx]", ip - vmstart);
+               }
+               up_read(&mm->mmap_sem);
+       }
+       if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
+               ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
+       return ret;
+}
+
+static int
+seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
+                     unsigned long sym_flags)
+{
+       struct mm_struct *mm = NULL;
+       int ret = 1;
+       unsigned int i;
+
+       if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
+               struct task_struct *task;
+               /*
+                * we do the lookup on the thread group leader,
+                * since individual threads might have already quit!
+                */
+               rcu_read_lock();
+               task = find_task_by_vpid(entry->ent.tgid);
+               if (task)
+                       mm = get_task_mm(task);
+               rcu_read_unlock();
+       }
+
+       for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
+               unsigned long ip = entry->caller[i];
+
+               if (ip == ULONG_MAX || !ret)
+                       break;
+               if (i && ret)
+                       ret = trace_seq_puts(s, " <- ");
+               if (!ip) {
+                       if (ret)
+                               ret = trace_seq_puts(s, "??");
+                       continue;
+               }
+               if (!ret)
+                       break;
+               if (ret)
+                       ret = seq_print_user_ip(s, mm, ip, sym_flags);
+       }
+
+       if (mm)
+               mmput(mm);
+       return ret;
+}
+
 static void print_lat_help_header(struct seq_file *m)
 {
        seq_puts(m, "#                  _------=> CPU#            \n");
@@ -1443,6 +1757,13 @@ lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
 
 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
 
+static int task_state_char(unsigned long state)
+{
+       int bit = state ? __ffs(state) + 1 : 0;
+
+       return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
+}
+
 /*
  * The message is supposed to contain an ending newline.
  * If the printing stops prematurely, try to add a newline of our own.
@@ -1484,10 +1805,16 @@ static void test_cpu_buff_start(struct trace_iterator *iter)
 {
        struct trace_seq *s = &iter->seq;
 
-       if (cpu_isset(iter->cpu, iter->started))
+       if (!(trace_flags & TRACE_ITER_ANNOTATE))
+               return;
+
+       if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
                return;
 
-       cpu_set(iter->cpu, iter->started);
+       if (cpumask_test_cpu(iter->cpu, iter->started))
+               return;
+
+       cpumask_set_cpu(iter->cpu, iter->started);
        trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu);
 }
 
@@ -1505,7 +1832,6 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
        char *comm;
        int S, T;
        int i;
-       unsigned state;
 
        if (entry->type == TRACE_CONT)
                return TRACE_TYPE_HANDLED;
@@ -1551,12 +1877,8 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
 
                trace_assign_type(field, entry);
 
-               T = field->next_state < sizeof(state_to_char) ?
-                       state_to_char[field->next_state] : 'X';
-
-               state = field->prev_state ?
-                       __ffs(field->prev_state) + 1 : 0;
-               S = state < sizeof(state_to_char) - 1 ? state_to_char[state] : 'X';
+               T = task_state_char(field->next_state);
+               S = task_state_char(field->prev_state);
                comm = trace_find_cmdline(field->next_pid);
                trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
                                 field->prev_pid,
@@ -1603,6 +1925,27 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
                        trace_seq_print_cont(s, iter);
                break;
        }
+       case TRACE_BRANCH: {
+               struct trace_branch *field;
+
+               trace_assign_type(field, entry);
+
+               trace_seq_printf(s, "[%s] %s:%s:%d\n",
+                                field->correct ? "  ok  " : " MISS ",
+                                field->func,
+                                field->file,
+                                field->line);
+               break;
+       }
+       case TRACE_USER_STACK: {
+               struct userstack_entry *field;
+
+               trace_assign_type(field, entry);
+
+               seq_print_userip_objs(field, s, sym_flags);
+               trace_seq_putc(s, '\n');
+               break;
+       }
        default:
                trace_seq_printf(s, "Unknown type %d\n", entry->type);
        }
@@ -1676,10 +2019,8 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
 
                trace_assign_type(field, entry);
 
-               S = field->prev_state < sizeof(state_to_char) ?
-                       state_to_char[field->prev_state] : 'X';
-               T = field->next_state < sizeof(state_to_char) ?
-                       state_to_char[field->next_state] : 'X';
+               T = task_state_char(field->next_state);
+               S = task_state_char(field->prev_state);
                ret = trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c\n",
                                       field->prev_pid,
                                       field->prev_prio,
@@ -1738,6 +2079,37 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
                        trace_seq_print_cont(s, iter);
                break;
        }
+       case TRACE_GRAPH_RET: {
+               return print_graph_function(iter);
+       }
+       case TRACE_GRAPH_ENT: {
+               return print_graph_function(iter);
+       }
+       case TRACE_BRANCH: {
+               struct trace_branch *field;
+
+               trace_assign_type(field, entry);
+
+               trace_seq_printf(s, "[%s] %s:%s:%d\n",
+                                field->correct ? "  ok  " : " MISS ",
+                                field->func,
+                                field->file,
+                                field->line);
+               break;
+       }
+       case TRACE_USER_STACK: {
+               struct userstack_entry *field;
+
+               trace_assign_type(field, entry);
+
+               ret = seq_print_userip_objs(field, s, sym_flags);
+               if (!ret)
+                       return TRACE_TYPE_PARTIAL_LINE;
+               ret = trace_seq_putc(s, '\n');
+               if (!ret)
+                       return TRACE_TYPE_PARTIAL_LINE;
+               break;
+       }
        }
        return TRACE_TYPE_HANDLED;
 }
@@ -1778,12 +2150,9 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
 
                trace_assign_type(field, entry);
 
-               S = field->prev_state < sizeof(state_to_char) ?
-                       state_to_char[field->prev_state] : 'X';
-               T = field->next_state < sizeof(state_to_char) ?
-                       state_to_char[field->next_state] : 'X';
-               if (entry->type == TRACE_WAKE)
-                       S = '+';
+               T = task_state_char(field->next_state);
+               S = entry->type == TRACE_WAKE ? '+' :
+                       task_state_char(field->prev_state);
                ret = trace_seq_printf(s, "%d %d %c %d %d %d %c\n",
                                       field->prev_pid,
                                       field->prev_prio,
@@ -1797,6 +2166,7 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
                break;
        }
        case TRACE_SPECIAL:
+       case TRACE_USER_STACK:
        case TRACE_STACK: {
                struct special_entry *field;
 
@@ -1869,12 +2239,9 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
 
                trace_assign_type(field, entry);
 
-               S = field->prev_state < sizeof(state_to_char) ?
-                       state_to_char[field->prev_state] : 'X';
-               T = field->next_state < sizeof(state_to_char) ?
-                       state_to_char[field->next_state] : 'X';
-               if (entry->type == TRACE_WAKE)
-                       S = '+';
+               T = task_state_char(field->next_state);
+               S = entry->type == TRACE_WAKE ? '+' :
+                       task_state_char(field->prev_state);
                SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
                SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
                SEQ_PUT_HEX_FIELD_RET(s, S);
@@ -1885,6 +2252,7 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
                break;
        }
        case TRACE_SPECIAL:
+       case TRACE_USER_STACK:
        case TRACE_STACK: {
                struct special_entry *field;
 
@@ -1901,6 +2269,25 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
        return TRACE_TYPE_HANDLED;
 }
 
+static enum print_line_t print_printk_msg_only(struct trace_iterator *iter)
+{
+       struct trace_seq *s = &iter->seq;
+       struct trace_entry *entry = iter->ent;
+       struct print_entry *field;
+       int ret;
+
+       trace_assign_type(field, entry);
+
+       ret = trace_seq_printf(s, field->buf);
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       if (entry->flags & TRACE_FLAG_CONT)
+               trace_seq_print_cont(s, iter);
+
+       return TRACE_TYPE_HANDLED;
+}
+
 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
 {
        struct trace_seq *s = &iter->seq;
@@ -1939,6 +2326,7 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
                break;
        }
        case TRACE_SPECIAL:
+       case TRACE_USER_STACK:
        case TRACE_STACK: {
                struct special_entry *field;
 
@@ -1980,6 +2368,11 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter)
                        return ret;
        }
 
+       if (iter->ent->type == TRACE_PRINT &&
+                       trace_flags & TRACE_ITER_PRINTK &&
+                       trace_flags & TRACE_ITER_PRINTK_MSGONLY)
+               return print_printk_msg_only(iter);
+
        if (trace_flags & TRACE_ITER_BIN)
                return print_bin_fmt(iter);
 
@@ -2004,7 +2397,9 @@ static int s_show(struct seq_file *m, void *v)
                        seq_printf(m, "# tracer: %s\n", iter->trace->name);
                        seq_puts(m, "#\n");
                }
-               if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
+               if (iter->trace && iter->trace->print_header)
+                       iter->trace->print_header(m);
+               else if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
                        /* print nothing if the buffers are empty */
                        if (trace_empty(iter))
                                return 0;
@@ -2056,6 +2451,15 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
        iter->trace = current_trace;
        iter->pos = -1;
 
+       /* Notify the tracer early; before we stop tracing. */
+       if (iter->trace && iter->trace->open)
+               iter->trace->open(iter);
+
+       /* Annotate start of buffers if we had overruns */
+       if (ring_buffer_overruns(iter->tr->buffer))
+               iter->iter_flags |= TRACE_FILE_ANNOTATE;
+
+
        for_each_tracing_cpu(cpu) {
 
                iter->buffer_iter[cpu] =
@@ -2076,9 +2480,6 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
        /* stop the trace while dumping */
        tracing_stop();
 
-       if (iter->trace && iter->trace->open)
-                       iter->trace->open(iter);
-
        mutex_unlock(&trace_types_lock);
 
  out:
@@ -2090,6 +2491,7 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
                        ring_buffer_read_finish(iter->buffer_iter[cpu]);
        }
        mutex_unlock(&trace_types_lock);
+       kfree(iter);
 
        return ERR_PTR(-ENOMEM);
 }
@@ -2244,13 +2646,7 @@ static struct file_operations show_traces_fops = {
 /*
  * Only trace on a CPU if the bitmask is set:
  */
-static cpumask_t tracing_cpumask = CPU_MASK_ALL;
-
-/*
- * When tracing/tracing_cpu_mask is modified then this holds
- * the new bitmask we are about to install:
- */
-static cpumask_t tracing_cpumask_new;
+static cpumask_var_t tracing_cpumask;
 
 /*
  * The tracer itself will not take this lock, but still we want
@@ -2291,39 +2687,45 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
                      size_t count, loff_t *ppos)
 {
        int err, cpu;
+       cpumask_var_t tracing_cpumask_new;
+
+       if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
+               return -ENOMEM;
 
        mutex_lock(&tracing_cpumask_update_lock);
        err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
        if (err)
                goto err_unlock;
 
-       raw_local_irq_disable();
+       local_irq_disable();
        __raw_spin_lock(&ftrace_max_lock);
        for_each_tracing_cpu(cpu) {
                /*
                 * Increase/decrease the disabled counter if we are
                 * about to flip a bit in the cpumask:
                 */
-               if (cpu_isset(cpu, tracing_cpumask) &&
-                               !cpu_isset(cpu, tracing_cpumask_new)) {
+               if (cpumask_test_cpu(cpu, tracing_cpumask) &&
+                               !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
                        atomic_inc(&global_trace.data[cpu]->disabled);
                }
-               if (!cpu_isset(cpu, tracing_cpumask) &&
-                               cpu_isset(cpu, tracing_cpumask_new)) {
+               if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
+                               cpumask_test_cpu(cpu, tracing_cpumask_new)) {
                        atomic_dec(&global_trace.data[cpu]->disabled);
                }
        }
        __raw_spin_unlock(&ftrace_max_lock);
-       raw_local_irq_enable();
+       local_irq_enable();
 
-       tracing_cpumask = tracing_cpumask_new;
+       cpumask_copy(tracing_cpumask, tracing_cpumask_new);
 
        mutex_unlock(&tracing_cpumask_update_lock);
+       free_cpumask_var(tracing_cpumask_new);
 
        return count;
 
 err_unlock:
        mutex_unlock(&tracing_cpumask_update_lock);
+       free_cpumask_var(tracing_cpumask);
 
        return err;
 }
@@ -2335,13 +2737,16 @@ static struct file_operations tracing_cpumask_fops = {
 };
 
 static ssize_t
-tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
+tracing_trace_options_read(struct file *filp, char __user *ubuf,
                       size_t cnt, loff_t *ppos)
 {
+       int i;
        char *buf;
        int r = 0;
        int len = 0;
-       int i;
+       u32 tracer_flags = current_trace->flags->val;
+       struct tracer_opt *trace_opts = current_trace->flags->opts;
+
 
        /* calulate max size */
        for (i = 0; trace_options[i]; i++) {
@@ -2349,6 +2754,15 @@ tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
                len += 3; /* "no" and space */
        }
 
+       /*
+        * Increase the size with names of options specific
+        * of the current tracer.
+        */
+       for (i = 0; trace_opts[i].name; i++) {
+               len += strlen(trace_opts[i].name);
+               len += 3; /* "no" and space */
+       }
+
        /* +2 for \n and \0 */
        buf = kmalloc(len + 2, GFP_KERNEL);
        if (!buf)
@@ -2361,6 +2775,15 @@ tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
                        r += sprintf(buf + r, "no%s ", trace_options[i]);
        }
 
+       for (i = 0; trace_opts[i].name; i++) {
+               if (tracer_flags & trace_opts[i].bit)
+                       r += sprintf(buf + r, "%s ",
+                               trace_opts[i].name);
+               else
+                       r += sprintf(buf + r, "no%s ",
+                               trace_opts[i].name);
+       }
+
        r += sprintf(buf + r, "\n");
        WARN_ON(r >= len + 2);
 
@@ -2371,13 +2794,48 @@ tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
        return r;
 }
 
+/* Try to assign a tracer specific option */
+static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
+{
+       struct tracer_flags *trace_flags = trace->flags;
+       struct tracer_opt *opts = NULL;
+       int ret = 0, i = 0;
+       int len;
+
+       for (i = 0; trace_flags->opts[i].name; i++) {
+               opts = &trace_flags->opts[i];
+               len = strlen(opts->name);
+
+               if (strncmp(cmp, opts->name, len) == 0) {
+                       ret = trace->set_flag(trace_flags->val,
+                               opts->bit, !neg);
+                       break;
+               }
+       }
+       /* Not found */
+       if (!trace_flags->opts[i].name)
+               return -EINVAL;
+
+       /* Refused to handle */
+       if (ret)
+               return ret;
+
+       if (neg)
+               trace_flags->val &= ~opts->bit;
+       else
+               trace_flags->val |= opts->bit;
+
+       return 0;
+}
+
 static ssize_t
-tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
+tracing_trace_options_write(struct file *filp, const char __user *ubuf,
                        size_t cnt, loff_t *ppos)
 {
        char buf[64];
        char *cmp = buf;
        int neg = 0;
+       int ret;
        int i;
 
        if (cnt >= sizeof(buf))
@@ -2404,11 +2862,13 @@ tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
                        break;
                }
        }
-       /*
-        * If no option could be set, return an error:
-        */
-       if (!trace_options[i])
-               return -EINVAL;
+
+       /* If no option could be set, test the specific tracer options */
+       if (!trace_options[i]) {
+               ret = set_tracer_option(current_trace, cmp, neg);
+               if (ret)
+                       return ret;
+       }
 
        filp->f_pos += cnt;
 
@@ -2417,8 +2877,8 @@ tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
 
 static struct file_operations tracing_iter_fops = {
        .open           = tracing_open_generic,
-       .read           = tracing_iter_ctrl_read,
-       .write          = tracing_iter_ctrl_write,
+       .read           = tracing_trace_options_read,
+       .write          = tracing_trace_options_write,
 };
 
 static const char readme_msg[] =
@@ -2432,9 +2892,9 @@ static const char readme_msg[] =
        "# echo sched_switch > /debug/tracing/current_tracer\n"
        "# cat /debug/tracing/current_tracer\n"
        "sched_switch\n"
-       "# cat /debug/tracing/iter_ctrl\n"
+       "# cat /debug/tracing/trace_options\n"
        "noprint-parent nosym-offset nosym-addr noverbose\n"
-       "# echo print-parent > /debug/tracing/iter_ctrl\n"
+       "# echo print-parent > /debug/tracing/trace_options\n"
        "# echo 1 > /debug/tracing/tracing_enabled\n"
        "# cat /debug/tracing/trace > /tmp/trace.txt\n"
        "echo 0 > /debug/tracing/tracing_enabled\n"
@@ -2543,13 +3003,18 @@ static int tracing_set_tracer(char *buf)
        if (t == current_trace)
                goto out;
 
+       trace_branch_disable();
        if (current_trace && current_trace->reset)
                current_trace->reset(tr);
 
        current_trace = t;
-       if (t->init)
-               t->init(tr);
+       if (t->init) {
+               ret = t->init(tr);
+               if (ret)
+                       goto out;
+       }
 
+       trace_branch_enable(tr);
  out:
        mutex_unlock(&trace_types_lock);
 
@@ -2563,6 +3028,9 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
        char buf[max_tracer_type_len+1];
        int i;
        size_t ret;
+       int err;
+
+       ret = cnt;
 
        if (cnt > max_tracer_type_len)
                cnt = max_tracer_type_len;
@@ -2576,12 +3044,11 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
        for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
                buf[i] = 0;
 
-       ret = tracing_set_tracer(buf);
-       if (!ret)
-               ret = cnt;
+       err = tracing_set_tracer(buf);
+       if (err)
+               return err;
 
-       if (ret > 0)
-               filp->f_pos += ret;
+       filp->f_pos += ret;
 
        return ret;
 }
@@ -2647,10 +3114,15 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
        if (!iter)
                return -ENOMEM;
 
+       if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
+               kfree(iter);
+               return -ENOMEM;
+       }
+
        mutex_lock(&trace_types_lock);
 
        /* trace pipe does not show start of buffer */
-       cpus_setall(iter->started);
+       cpumask_setall(iter->started);
 
        iter->tr = &global_trace;
        iter->trace = current_trace;
@@ -2667,6 +3139,7 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
 {
        struct trace_iterator *iter = file->private_data;
 
+       free_cpumask_var(iter->started);
        kfree(iter);
        atomic_dec(&tracing_reader);
 
@@ -2827,7 +3300,7 @@ tracing_entries_read(struct file *filp, char __user *ubuf,
        char buf[64];
        int r;
 
-       r = sprintf(buf, "%lu\n", tr->entries);
+       r = sprintf(buf, "%lu\n", tr->entries >> 10);
        return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 }
 
@@ -2867,6 +3340,9 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
                        atomic_inc(&max_tr.data[cpu]->disabled);
        }
 
+       /* value is in KB */
+       val <<= 10;
+
        if (val != global_trace.entries) {
                ret = ring_buffer_resize(global_trace.buffer, val);
                if (ret < 0) {
@@ -2917,7 +3393,7 @@ static int mark_printk(const char *fmt, ...)
        int ret;
        va_list args;
        va_start(args, fmt);
-       ret = trace_vprintk(0, fmt, args);
+       ret = trace_vprintk(0, -1, fmt, args);
        va_end(args);
        return ret;
 }
@@ -3067,10 +3543,10 @@ static __init int tracer_init_debugfs(void)
        if (!entry)
                pr_warning("Could not create debugfs 'tracing_enabled' entry\n");
 
-       entry = debugfs_create_file("iter_ctrl", 0644, d_tracer,
+       entry = debugfs_create_file("trace_options", 0644, d_tracer,
                                    NULL, &tracing_iter_fops);
        if (!entry)
-               pr_warning("Could not create debugfs 'iter_ctrl' entry\n");
+               pr_warning("Could not create debugfs 'trace_options' entry\n");
 
        entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer,
                                    NULL, &tracing_cpumask_fops);
@@ -3120,11 +3596,11 @@ static __init int tracer_init_debugfs(void)
                pr_warning("Could not create debugfs "
                           "'trace_pipe' entry\n");
 
-       entry = debugfs_create_file("trace_entries", 0644, d_tracer,
+       entry = debugfs_create_file("buffer_size_kb", 0644, d_tracer,
                                    &global_trace, &tracing_entries_fops);
        if (!entry)
                pr_warning("Could not create debugfs "
-                          "'trace_entries' entry\n");
+                          "'buffer_size_kb' entry\n");
 
        entry = debugfs_create_file("trace_marker", 0220, d_tracer,
                                    NULL, &tracing_mark_fops);
@@ -3146,7 +3622,7 @@ static __init int tracer_init_debugfs(void)
        return 0;
 }
 
-int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
+int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
 {
        static DEFINE_SPINLOCK(trace_buf_lock);
        static char trace_buf[TRACE_BUF_SIZE];
@@ -3154,11 +3630,11 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
        struct ring_buffer_event *event;
        struct trace_array *tr = &global_trace;
        struct trace_array_cpu *data;
-       struct print_entry *entry;
-       unsigned long flags, irq_flags;
        int cpu, len = 0, size, pc;
+       struct print_entry *entry;
+       unsigned long irq_flags;
 
-       if (tracing_disabled)
+       if (tracing_disabled || tracing_selftest_running)
                return 0;
 
        pc = preempt_count();
@@ -3169,7 +3645,8 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
        if (unlikely(atomic_read(&data->disabled)))
                goto out;
 
-       spin_lock_irqsave(&trace_buf_lock, flags);
+       pause_graph_tracing();
+       spin_lock_irqsave(&trace_buf_lock, irq_flags);
        len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
 
        len = min(len, TRACE_BUF_SIZE-1);
@@ -3180,17 +3657,18 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
        if (!event)
                goto out_unlock;
        entry = ring_buffer_event_data(event);
-       tracing_generic_entry_update(&entry->ent, flags, pc);
+       tracing_generic_entry_update(&entry->ent, irq_flags, pc);
        entry->ent.type                 = TRACE_PRINT;
        entry->ip                       = ip;
+       entry->depth                    = depth;
 
        memcpy(&entry->buf, trace_buf, len);
        entry->buf[len] = 0;
        ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
 
  out_unlock:
-       spin_unlock_irqrestore(&trace_buf_lock, flags);
-
+       spin_unlock_irqrestore(&trace_buf_lock, irq_flags);
+       unpause_graph_tracing();
  out:
        preempt_enable_notrace();
 
@@ -3207,7 +3685,7 @@ int __ftrace_printk(unsigned long ip, const char *fmt, ...)
                return 0;
 
        va_start(ap, fmt);
-       ret = trace_vprintk(ip, fmt, ap);
+       ret = trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap);
        va_end(ap);
        return ret;
 }
@@ -3258,7 +3736,7 @@ static struct notifier_block trace_die_notifier = {
  * it if we decide to change what log level the ftrace dump
  * should be at.
  */
-#define KERN_TRACE             KERN_INFO
+#define KERN_TRACE             KERN_EMERG
 
 static void
 trace_printk_seq(struct trace_seq *s)
@@ -3280,7 +3758,6 @@ void ftrace_dump(void)
        static DEFINE_SPINLOCK(ftrace_dump_lock);
        /* use static because iter can be a bit big for the stack */
        static struct trace_iterator iter;
-       static cpumask_t mask;
        static int dump_ran;
        unsigned long flags;
        int cnt = 0, cpu;
@@ -3293,12 +3770,16 @@ void ftrace_dump(void)
        dump_ran = 1;
 
        /* No turning back! */
+       tracing_off();
        ftrace_kill();
 
        for_each_tracing_cpu(cpu) {
                atomic_inc(&global_trace.data[cpu]->disabled);
        }
 
+       /* don't look at user memory in panic mode */
+       trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
+
        printk(KERN_TRACE "Dumping ftrace buffer:\n");
 
        iter.tr = &global_trace;
@@ -3311,8 +3792,6 @@ void ftrace_dump(void)
         * and then release the locks again.
         */
 
-       cpus_clear(mask);
-
        while (!trace_empty(&iter)) {
 
                if (!cnt)
@@ -3348,19 +3827,28 @@ __init static int tracer_alloc_buffers(void)
 {
        struct trace_array_cpu *data;
        int i;
+       int ret = -ENOMEM;
 
-       /* TODO: make the number of buffers hot pluggable with CPUS */
-       tracing_buffer_mask = cpu_possible_map;
+       if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
+               goto out;
 
+       if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
+               goto out_free_buffer_mask;
+
+       cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
+       cpumask_copy(tracing_cpumask, cpu_all_mask);
+
+       /* TODO: make the number of buffers hot pluggable with CPUS */
        global_trace.buffer = ring_buffer_alloc(trace_buf_size,
                                                   TRACE_BUFFER_FLAGS);
        if (!global_trace.buffer) {
                printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
                WARN_ON(1);
-               return 0;
+               goto out_free_cpumask;
        }
        global_trace.entries = ring_buffer_size(global_trace.buffer);
 
+
 #ifdef CONFIG_TRACER_MAX_TRACE
        max_tr.buffer = ring_buffer_alloc(trace_buf_size,
                                             TRACE_BUFFER_FLAGS);
@@ -3368,7 +3856,7 @@ __init static int tracer_alloc_buffers(void)
                printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
                WARN_ON(1);
                ring_buffer_free(global_trace.buffer);
-               return 0;
+               goto out_free_cpumask;
        }
        max_tr.entries = ring_buffer_size(max_tr.buffer);
        WARN_ON(max_tr.entries != global_trace.entries);
@@ -3398,8 +3886,14 @@ __init static int tracer_alloc_buffers(void)
                                       &trace_panic_notifier);
 
        register_die_notifier(&trace_die_notifier);
+       ret = 0;
 
-       return 0;
+out_free_cpumask:
+       free_cpumask_var(tracing_cpumask);
+out_free_buffer_mask:
+       free_cpumask_var(tracing_buffer_mask);
+out:
+       return ret;
 }
 early_initcall(tracer_alloc_buffers);
 fs_initcall(tracer_init_debugfs);