audit: preface audit printk with audit
[safe/jmp/linux-2.6] / kernel / trace / trace.c
index ababedb..44f916a 100644 (file)
 #include <linux/kdebug.h>
 #include <linux/string.h>
 #include <linux/rwsem.h>
+#include <linux/slab.h>
 #include <linux/ctype.h>
 #include <linux/init.h>
 #include <linux/poll.h>
-#include <linux/gfp.h>
 #include <linux/fs.h>
 
 #include "trace.h"
@@ -92,12 +92,12 @@ DEFINE_PER_CPU(int, ftrace_cpu_disabled);
 static inline void ftrace_disable_cpu(void)
 {
        preempt_disable();
-       __this_cpu_inc(per_cpu_var(ftrace_cpu_disabled));
+       __this_cpu_inc(ftrace_cpu_disabled);
 }
 
 static inline void ftrace_enable_cpu(void)
 {
-       __this_cpu_dec(per_cpu_var(ftrace_cpu_disabled));
+       __this_cpu_dec(ftrace_cpu_disabled);
        preempt_enable();
 }
 
@@ -840,10 +840,10 @@ out:
        mutex_unlock(&trace_types_lock);
 }
 
-static void __tracing_reset(struct trace_array *tr, int cpu)
+static void __tracing_reset(struct ring_buffer *buffer, int cpu)
 {
        ftrace_disable_cpu();
-       ring_buffer_reset_cpu(tr->buffer, cpu);
+       ring_buffer_reset_cpu(buffer, cpu);
        ftrace_enable_cpu();
 }
 
@@ -855,7 +855,7 @@ void tracing_reset(struct trace_array *tr, int cpu)
 
        /* Make sure all commits have finished */
        synchronize_sched();
-       __tracing_reset(tr, cpu);
+       __tracing_reset(buffer, cpu);
 
        ring_buffer_record_enable(buffer);
 }
@@ -873,7 +873,7 @@ void tracing_reset_online_cpus(struct trace_array *tr)
        tr->time_start = ftrace_now(tr->cpu);
 
        for_each_online_cpu(cpu)
-               __tracing_reset(tr, cpu);
+               __tracing_reset(buffer, cpu);
 
        ring_buffer_record_enable(buffer);
 }
@@ -950,6 +950,8 @@ void tracing_start(void)
                goto out;
        }
 
+       /* Prevent the buffers from switching */
+       arch_spin_lock(&ftrace_max_lock);
 
        buffer = global_trace.buffer;
        if (buffer)
@@ -959,6 +961,8 @@ void tracing_start(void)
        if (buffer)
                ring_buffer_record_enable(buffer);
 
+       arch_spin_unlock(&ftrace_max_lock);
+
        ftrace_start();
  out:
        spin_unlock_irqrestore(&tracing_start_lock, flags);
@@ -980,6 +984,9 @@ void tracing_stop(void)
        if (trace_stop_count++)
                goto out;
 
+       /* Prevent the buffers from switching */
+       arch_spin_lock(&ftrace_max_lock);
+
        buffer = global_trace.buffer;
        if (buffer)
                ring_buffer_record_disable(buffer);
@@ -988,6 +995,8 @@ void tracing_stop(void)
        if (buffer)
                ring_buffer_record_disable(buffer);
 
+       arch_spin_unlock(&ftrace_max_lock);
+
  out:
        spin_unlock_irqrestore(&tracing_start_lock, flags);
 }
@@ -1182,7 +1191,7 @@ trace_function(struct trace_array *tr,
        struct ftrace_entry *entry;
 
        /* If we are reading the ring buffer, don't trace */
-       if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
+       if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
                return;
 
        event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
@@ -1275,6 +1284,13 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
        if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
                return;
 
+       /*
+        * NMIs can not handle page faults, even with fix ups.
+        * The save user stack can (and often does) fault.
+        */
+       if (unlikely(in_nmi()))
+               return;
+
        event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
                                          sizeof(*entry), flags, pc);
        if (!event)
@@ -1719,6 +1735,7 @@ static void *s_start(struct seq_file *m, loff_t *pos)
 
                ftrace_enable_cpu();
 
+               iter->leftover = 0;
                for (p = iter; p && l < *pos; p = s_next(m, p, &l))
                        ;