Merge branches 'tracing/ftrace', 'tracing/kprobes', 'tracing/tasks' and 'linus' into...
[safe/jmp/linux-2.6] / kernel / trace / trace.c
index 1ce6208..c95b729 100644 (file)
@@ -155,13 +155,6 @@ ns2usecs(cycle_t nsec)
        return nsec;
 }
 
-cycle_t ftrace_now(int cpu)
-{
-       u64 ts = ring_buffer_time_stamp(cpu);
-       ring_buffer_normalize_time_stamp(cpu, &ts);
-       return ts;
-}
-
 /*
  * The global_trace is the descriptor that holds the tracing
  * buffers for the live tracing. For each CPU, it contains
@@ -178,6 +171,20 @@ static struct trace_array  global_trace;
 
 static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
 
+cycle_t ftrace_now(int cpu)
+{
+       u64 ts;
+
+       /* Early boot up does not have a buffer yet */
+       if (!global_trace.buffer)
+               return trace_clock_local();
+
+       ts = ring_buffer_time_stamp(global_trace.buffer, cpu);
+       ring_buffer_normalize_time_stamp(global_trace.buffer, cpu, &ts);
+
+       return ts;
+}
+
 /*
  * The max_tr is used to snapshot the global_trace when a maximum
  * latency is reached. Some tracers will use this to store a maximum
@@ -308,6 +315,7 @@ static const char *trace_options[] = {
        "printk-msg-only",
        "context-info",
        "latency-format",
+       "global-clock",
        NULL
 };
 
@@ -633,6 +641,7 @@ void tracing_reset_online_cpus(struct trace_array *tr)
 }
 
 #define SAVED_CMDLINES 128
+#define NO_CMDLINE_MAP UINT_MAX
 static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
 static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
 static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
@@ -644,8 +653,8 @@ static atomic_t trace_record_cmdline_disabled __read_mostly;
 
 static void trace_init_cmdlines(void)
 {
-       memset(&map_pid_to_cmdline, -1, sizeof(map_pid_to_cmdline));
-       memset(&map_cmdline_to_pid, -1, sizeof(map_cmdline_to_pid));
+       memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
+       memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
        cmdline_idx = 0;
 }
 
@@ -737,8 +746,7 @@ void trace_stop_cmdline_recording(void);
 
 static void trace_save_cmdline(struct task_struct *tsk)
 {
-       unsigned map;
-       unsigned idx;
+       unsigned pid, idx;
 
        if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
                return;
@@ -753,13 +761,20 @@ static void trace_save_cmdline(struct task_struct *tsk)
                return;
 
        idx = map_pid_to_cmdline[tsk->pid];
-       if (idx >= SAVED_CMDLINES) {
+       if (idx == NO_CMDLINE_MAP) {
                idx = (cmdline_idx + 1) % SAVED_CMDLINES;
 
-               map = map_cmdline_to_pid[idx];
-               if (map <= PID_MAX_DEFAULT)
-                       map_pid_to_cmdline[map] = (unsigned)-1;
+               /*
+                * Check whether the cmdline buffer at idx has a pid
+                * mapped. We are going to overwrite that entry so we
+                * need to clear the map_pid_to_cmdline. Otherwise we
+                * would read the new comm for the old pid.
+                */
+               pid = map_cmdline_to_pid[idx];
+               if (pid != NO_CMDLINE_MAP)
+                       map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
 
+               map_cmdline_to_pid[idx] = tsk->pid;
                map_pid_to_cmdline[tsk->pid] = idx;
 
                cmdline_idx = idx;
@@ -786,18 +801,18 @@ void trace_find_cmdline(int pid, char comm[])
 
        __raw_spin_lock(&trace_cmdline_lock);
        map = map_pid_to_cmdline[pid];
-       if (map >= SAVED_CMDLINES)
-               goto out;
-
-       strcpy(comm, saved_cmdlines[map]);
+       if (map != NO_CMDLINE_MAP)
+               strcpy(comm, saved_cmdlines[map]);
+       else
+               strcpy(comm, "<...>");
 
- out:
        __raw_spin_unlock(&trace_cmdline_lock);
 }
 
 void tracing_record_cmdline(struct task_struct *tsk)
 {
-       if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
+       if (atomic_read(&trace_record_cmdline_disabled) || !tracer_enabled ||
+           !tracing_is_on())
                return;
 
        trace_save_cmdline(tsk);
@@ -2244,6 +2259,34 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
        return 0;
 }
 
+static void set_tracer_flags(unsigned int mask, int enabled)
+{
+       /* do nothing if flag is already set */
+       if (!!(trace_flags & mask) == !!enabled)
+               return;
+
+       if (enabled)
+               trace_flags |= mask;
+       else
+               trace_flags &= ~mask;
+
+       if (mask == TRACE_ITER_GLOBAL_CLK) {
+               u64 (*func)(void);
+
+               if (enabled)
+                       func = trace_clock_global;
+               else
+                       func = trace_clock_local;
+
+               mutex_lock(&trace_types_lock);
+               ring_buffer_set_clock(global_trace.buffer, func);
+
+               if (max_tr.buffer)
+                       ring_buffer_set_clock(max_tr.buffer, func);
+               mutex_unlock(&trace_types_lock);
+       }
+}
+
 static ssize_t
 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
                        size_t cnt, loff_t *ppos)
@@ -2271,10 +2314,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
                int len = strlen(trace_options[i]);
 
                if (strncmp(cmp, trace_options[i], len) == 0) {
-                       if (neg)
-                               trace_flags &= ~(1 << i);
-                       else
-                               trace_flags |= (1 << i);
+                       set_tracer_flags(1 << i, !neg);
                        break;
                }
        }