cgroups: fix lockdep subclasses overflow
[safe/jmp/linux-2.6] / kernel / trace / trace_sched_wakeup.c
index 5948011..42ae1e7 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/kallsyms.h>
 #include <linux/uaccess.h>
 #include <linux/ftrace.h>
+#include <trace/sched.h>
 
 #include "trace.h"
 
@@ -25,10 +26,69 @@ static struct task_struct   *wakeup_task;
 static int                     wakeup_cpu;
 static unsigned                        wakeup_prio = -1;
 
-static DEFINE_SPINLOCK(wakeup_lock);
+static raw_spinlock_t wakeup_lock =
+       (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
 
 static void __wakeup_reset(struct trace_array *tr);
 
+#ifdef CONFIG_FUNCTION_TRACER
+/*
+ * irqsoff uses its own tracer function to keep the overhead down:
+ */
+static void
+wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
+{
+       struct trace_array *tr = wakeup_trace;
+       struct trace_array_cpu *data;
+       unsigned long flags;
+       long disabled;
+       int resched;
+       int cpu;
+       int pc;
+
+       if (likely(!wakeup_task))
+               return;
+
+       pc = preempt_count();
+       resched = ftrace_preempt_disable();
+
+       cpu = raw_smp_processor_id();
+       data = tr->data[cpu];
+       disabled = atomic_inc_return(&data->disabled);
+       if (unlikely(disabled != 1))
+               goto out;
+
+       local_irq_save(flags);
+       __raw_spin_lock(&wakeup_lock);
+
+       if (unlikely(!wakeup_task))
+               goto unlock;
+
+       /*
+        * The task can't disappear because it needs to
+        * wake up first, and we have the wakeup_lock.
+        */
+       if (task_cpu(wakeup_task) != cpu)
+               goto unlock;
+
+       trace_function(tr, data, ip, parent_ip, flags, pc);
+
+ unlock:
+       __raw_spin_unlock(&wakeup_lock);
+       local_irq_restore(flags);
+
+ out:
+       atomic_dec(&data->disabled);
+
+       ftrace_preempt_enable(resched);
+}
+
+static struct ftrace_ops trace_ops __read_mostly =
+{
+       .func = wakeup_tracer_call,
+};
+#endif /* CONFIG_FUNCTION_TRACER */
+
 /*
  * Should this new latency be reported/recorded?
  */
@@ -44,16 +104,19 @@ static int report_latency(cycle_t delta)
        return 1;
 }
 
-void
-wakeup_sched_switch(struct task_struct *prev, struct task_struct *next)
+static void notrace
+probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
+       struct task_struct *next)
 {
        unsigned long latency = 0, t0 = 0, t1 = 0;
-       struct trace_array *tr = wakeup_trace;
        struct trace_array_cpu *data;
        cycle_t T0, T1, delta;
        unsigned long flags;
        long disabled;
        int cpu;
+       int pc;
+
+       tracing_record_cmdline(prev);
 
        if (unlikely(!tracer_enabled))
                return;
@@ -70,22 +133,25 @@ wakeup_sched_switch(struct task_struct *prev, struct task_struct *next)
        if (next != wakeup_task)
                return;
 
-       /* The task we are waitng for is waking up */
-       data = tr->data[wakeup_cpu];
+       pc = preempt_count();
+
+       /* The task we are waiting for is waking up */
+       data = wakeup_trace->data[wakeup_cpu];
 
        /* disable local data, not wakeup_cpu data */
        cpu = raw_smp_processor_id();
-       disabled = atomic_inc_return(&tr->data[cpu]->disabled);
+       disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
        if (likely(disabled != 1))
                goto out;
 
-       spin_lock_irqsave(&wakeup_lock, flags);
+       local_irq_save(flags);
+       __raw_spin_lock(&wakeup_lock);
 
        /* We could race with grabbing wakeup_lock */
        if (unlikely(!tracer_enabled || next != wakeup_task))
                goto out_unlock;
 
-       trace_function(tr, data, CALLER_ADDR1, CALLER_ADDR2, flags);
+       trace_function(wakeup_trace, data, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
 
        /*
         * usecs conversion is slow so we try to delay the conversion
@@ -104,13 +170,14 @@ wakeup_sched_switch(struct task_struct *prev, struct task_struct *next)
        t0 = nsecs_to_usecs(T0);
        t1 = nsecs_to_usecs(T1);
 
-       update_max_tr(tr, wakeup_task, wakeup_cpu);
+       update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu);
 
 out_unlock:
-       __wakeup_reset(tr);
-       spin_unlock_irqrestore(&wakeup_lock, flags);
+       __wakeup_reset(wakeup_trace);
+       __raw_spin_unlock(&wakeup_lock);
+       local_irq_restore(flags);
 out:
-       atomic_dec(&tr->data[cpu]->disabled);
+       atomic_dec(&wakeup_trace->data[cpu]->disabled);
 }
 
 static void __wakeup_reset(struct trace_array *tr)
@@ -118,11 +185,9 @@ static void __wakeup_reset(struct trace_array *tr)
        struct trace_array_cpu *data;
        int cpu;
 
-       assert_spin_locked(&wakeup_lock);
-
        for_each_possible_cpu(cpu) {
                data = tr->data[cpu];
-               tracing_reset(data);
+               tracing_reset(tr, cpu);
        }
 
        wakeup_cpu = -1;
@@ -138,37 +203,46 @@ static void wakeup_reset(struct trace_array *tr)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&wakeup_lock, flags);
+       local_irq_save(flags);
+       __raw_spin_lock(&wakeup_lock);
        __wakeup_reset(tr);
-       spin_unlock_irqrestore(&wakeup_lock, flags);
+       __raw_spin_unlock(&wakeup_lock);
+       local_irq_restore(flags);
 }
 
 static void
-wakeup_check_start(struct trace_array *tr, struct task_struct *p,
-                  struct task_struct *curr)
+probe_wakeup(struct rq *rq, struct task_struct *p, int success)
 {
        int cpu = smp_processor_id();
        unsigned long flags;
        long disabled;
+       int pc;
+
+       if (likely(!tracer_enabled))
+               return;
+
+       tracing_record_cmdline(p);
+       tracing_record_cmdline(current);
 
        if (likely(!rt_task(p)) ||
                        p->prio >= wakeup_prio ||
-                       p->prio >= curr->prio)
+                       p->prio >= current->prio)
                return;
 
-       disabled = atomic_inc_return(&tr->data[cpu]->disabled);
+       pc = preempt_count();
+       disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
        if (unlikely(disabled != 1))
                goto out;
 
        /* interrupts should be off from try_to_wake_up */
-       spin_lock(&wakeup_lock);
+       __raw_spin_lock(&wakeup_lock);
 
        /* check for races. */
        if (!tracer_enabled || p->prio >= wakeup_prio)
                goto out_locked;
 
        /* reset the trace */
-       __wakeup_reset(tr);
+       __wakeup_reset(wakeup_trace);
 
        wakeup_cpu = task_cpu(p);
        wakeup_prio = p->prio;
@@ -178,29 +252,47 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p,
 
        local_save_flags(flags);
 
-       tr->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu);
-       trace_function(tr, tr->data[wakeup_cpu],
-                      CALLER_ADDR1, CALLER_ADDR2, flags);
+       wakeup_trace->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu);
+       trace_function(wakeup_trace, wakeup_trace->data[wakeup_cpu],
+                      CALLER_ADDR1, CALLER_ADDR2, flags, pc);
 
 out_locked:
-       spin_unlock(&wakeup_lock);
+       __raw_spin_unlock(&wakeup_lock);
 out:
-       atomic_dec(&tr->data[cpu]->disabled);
+       atomic_dec(&wakeup_trace->data[cpu]->disabled);
 }
 
-void wakeup_sched_wakeup(struct task_struct *wakee, struct task_struct *curr)
+/*
+ * save_tracer_enabled is used to save the state of the tracer_enabled
+ * variable when we disable it when we open a trace output file.
+ */
+static int save_tracer_enabled;
+
+static void start_wakeup_tracer(struct trace_array *tr)
 {
-       if (likely(!tracer_enabled))
+       int ret;
+
+       ret = register_trace_sched_wakeup(probe_wakeup);
+       if (ret) {
+               pr_info("wakeup trace: Couldn't activate tracepoint"
+                       " probe to kernel_sched_wakeup\n");
                return;
+       }
 
-       tracing_record_cmdline(curr);
-       tracing_record_cmdline(wakee);
+       ret = register_trace_sched_wakeup_new(probe_wakeup);
+       if (ret) {
+               pr_info("wakeup trace: Couldn't activate tracepoint"
+                       " probe to kernel_sched_wakeup_new\n");
+               goto fail_deprobe;
+       }
 
-       wakeup_check_start(wakeup_trace, wakee, curr);
-}
+       ret = register_trace_sched_switch(probe_wakeup_sched_switch);
+       if (ret) {
+               pr_info("sched trace: Couldn't activate tracepoint"
+                       " probe to kernel_sched_schedule\n");
+               goto fail_deprobe_wake_new;
+       }
 
-static void start_wakeup_tracer(struct trace_array *tr)
-{
        wakeup_reset(tr);
 
        /*
@@ -212,53 +304,74 @@ static void start_wakeup_tracer(struct trace_array *tr)
         */
        smp_wmb();
 
-       tracer_enabled = 1;
+       register_ftrace_function(&trace_ops);
+
+       if (tracing_is_enabled()) {
+               tracer_enabled = 1;
+               save_tracer_enabled = 1;
+       } else {
+               tracer_enabled = 0;
+               save_tracer_enabled = 0;
+       }
 
        return;
+fail_deprobe_wake_new:
+       unregister_trace_sched_wakeup_new(probe_wakeup);
+fail_deprobe:
+       unregister_trace_sched_wakeup(probe_wakeup);
 }
 
 static void stop_wakeup_tracer(struct trace_array *tr)
 {
        tracer_enabled = 0;
+       save_tracer_enabled = 0;
+       unregister_ftrace_function(&trace_ops);
+       unregister_trace_sched_switch(probe_wakeup_sched_switch);
+       unregister_trace_sched_wakeup_new(probe_wakeup);
+       unregister_trace_sched_wakeup(probe_wakeup);
 }
 
-static void wakeup_tracer_init(struct trace_array *tr)
+static int wakeup_tracer_init(struct trace_array *tr)
 {
+       tracing_max_latency = 0;
        wakeup_trace = tr;
-
-       if (tr->ctrl)
-               start_wakeup_tracer(tr);
+       start_wakeup_tracer(tr);
+       return 0;
 }
 
 static void wakeup_tracer_reset(struct trace_array *tr)
 {
-       if (tr->ctrl) {
-               stop_wakeup_tracer(tr);
-               /* make sure we put back any tasks we are tracing */
-               wakeup_reset(tr);
-       }
+       stop_wakeup_tracer(tr);
+       /* make sure we put back any tasks we are tracing */
+       wakeup_reset(tr);
 }
 
-static void wakeup_tracer_ctrl_update(struct trace_array *tr)
+static void wakeup_tracer_start(struct trace_array *tr)
 {
-       if (tr->ctrl)
-               start_wakeup_tracer(tr);
-       else
-               stop_wakeup_tracer(tr);
+       wakeup_reset(tr);
+       tracer_enabled = 1;
+       save_tracer_enabled = 1;
+}
+
+static void wakeup_tracer_stop(struct trace_array *tr)
+{
+       tracer_enabled = 0;
+       save_tracer_enabled = 0;
 }
 
 static void wakeup_tracer_open(struct trace_iterator *iter)
 {
        /* stop the trace while dumping */
-       if (iter->tr->ctrl)
-               stop_wakeup_tracer(iter->tr);
+       tracer_enabled = 0;
 }
 
 static void wakeup_tracer_close(struct trace_iterator *iter)
 {
        /* forget about any processes we were recording */
-       if (iter->tr->ctrl)
-               start_wakeup_tracer(iter->tr);
+       if (save_tracer_enabled) {
+               wakeup_reset(iter->tr);
+               tracer_enabled = 1;
+       }
 }
 
 static struct tracer wakeup_tracer __read_mostly =
@@ -266,9 +379,10 @@ static struct tracer wakeup_tracer __read_mostly =
        .name           = "wakeup",
        .init           = wakeup_tracer_init,
        .reset          = wakeup_tracer_reset,
+       .start          = wakeup_tracer_start,
+       .stop           = wakeup_tracer_stop,
        .open           = wakeup_tracer_open,
        .close          = wakeup_tracer_close,
-       .ctrl_update    = wakeup_tracer_ctrl_update,
        .print_max      = 1,
 #ifdef CONFIG_FTRACE_SELFTEST
        .selftest    = trace_selftest_startup_wakeup,