Merge branch 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[safe/jmp/linux-2.6] / kernel / trace / trace_clock.c
index b588fd8..84a3a7b 100644 (file)
@@ -20,6 +20,8 @@
 #include <linux/ktime.h>
 #include <linux/trace_clock.h>
 
+#include "trace.h"
+
 /*
  * trace_clock_local(): the simplest and least coherent tracing clock.
  *
  */
 u64 notrace trace_clock_local(void)
 {
-       unsigned long flags;
        u64 clock;
+       int resched;
 
        /*
         * sched_clock() is an architecture implemented, fast, scalable,
         * lockless clock. It is not guaranteed to be coherent across
         * CPUs, nor across CPU idle events.
         */
-       raw_local_irq_save(flags);
+       resched = ftrace_preempt_disable();
        clock = sched_clock();
-       raw_local_irq_restore(flags);
+       ftrace_preempt_enable(resched);
 
        return clock;
 }
@@ -66,10 +68,14 @@ u64 notrace trace_clock(void)
  * Used by plugins that need globally coherent timestamps.
  */
 
-static u64 prev_trace_clock_time;
-
-static raw_spinlock_t trace_clock_lock ____cacheline_aligned_in_smp =
-       (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+/* keep prev_time and lock in the same cacheline. */
+static struct {
+       u64 prev_time;
+       arch_spinlock_t lock;
+} trace_clock_struct ____cacheline_aligned_in_smp =
+       {
+               .lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED,
+       };
 
 u64 notrace trace_clock_global(void)
 {
@@ -88,19 +94,19 @@ u64 notrace trace_clock_global(void)
        if (unlikely(in_nmi()))
                goto out;
 
-       __raw_spin_lock(&trace_clock_lock);
+       arch_spin_lock(&trace_clock_struct.lock);
 
        /*
         * TODO: if this happens often then maybe we should reset
-        * my_scd->clock to prev_trace_clock_time+1, to make sure
+        * my_scd->clock to prev_time+1, to make sure
         * we start ticking with the local clock from now on?
         */
-       if ((s64)(now - prev_trace_clock_time) < 0)
-               now = prev_trace_clock_time + 1;
+       if ((s64)(now - trace_clock_struct.prev_time) < 0)
+               now = trace_clock_struct.prev_time + 1;
 
-       prev_trace_clock_time = now;
+       trace_clock_struct.prev_time = now;
 
-       __raw_spin_unlock(&trace_clock_lock);
+       arch_spin_unlock(&trace_clock_struct.lock);
 
  out:
        raw_local_irq_restore(flags);