#include <linux/kallsyms.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
+#include <trace/events/sched.h>
#include "trace.h"
static struct task_struct *wakeup_task;
static int wakeup_cpu;
+static int wakeup_current_cpu;
static unsigned wakeup_prio = -1;
+static int wakeup_rt;
-static DEFINE_SPINLOCK(wakeup_lock);
+static raw_spinlock_t wakeup_lock =
+ (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
-static void notrace __wakeup_reset(struct trace_array *tr);
+static void __wakeup_reset(struct trace_array *tr);
+
+static int save_lat_flag;
+
+#ifdef CONFIG_FUNCTION_TRACER
+/*
+ * irqsoff uses its own tracer function to keep the overhead down:
+ */
+static void
+wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
+{
+ struct trace_array *tr = wakeup_trace;
+ struct trace_array_cpu *data;
+ unsigned long flags;
+ long disabled;
+ int resched;
+ int cpu;
+ int pc;
+
+ if (likely(!wakeup_task))
+ return;
+
+ pc = preempt_count();
+ resched = ftrace_preempt_disable();
+
+ cpu = raw_smp_processor_id();
+ if (cpu != wakeup_current_cpu)
+ goto out_enable;
+
+ data = tr->data[cpu];
+ disabled = atomic_inc_return(&data->disabled);
+ if (unlikely(disabled != 1))
+ goto out;
+
+ local_irq_save(flags);
+
+ trace_function(tr, ip, parent_ip, flags, pc);
+
+ local_irq_restore(flags);
+
+ out:
+ atomic_dec(&data->disabled);
+ out_enable:
+ ftrace_preempt_enable(resched);
+}
+
+static struct ftrace_ops trace_ops __read_mostly =
+{
+ .func = wakeup_tracer_call,
+};
+#endif /* CONFIG_FUNCTION_TRACER */
/*
* Should this new latency be reported/recorded?
*/
-static int notrace report_latency(cycle_t delta)
+static int report_latency(cycle_t delta)
{
if (tracing_thresh) {
if (delta < tracing_thresh)
return 1;
}
-void notrace
-wakeup_sched_switch(struct task_struct *prev, struct task_struct *next)
+static void probe_wakeup_migrate_task(struct task_struct *task, int cpu)
+{
+ if (task != wakeup_task)
+ return;
+
+ wakeup_current_cpu = cpu;
+}
+
+static void notrace
+probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
+ struct task_struct *next)
{
- unsigned long latency = 0, t0 = 0, t1 = 0;
- struct trace_array *tr = wakeup_trace;
struct trace_array_cpu *data;
cycle_t T0, T1, delta;
unsigned long flags;
long disabled;
int cpu;
+ int pc;
+
+ tracing_record_cmdline(prev);
if (unlikely(!tracer_enabled))
return;
if (next != wakeup_task)
return;
- /* The task we are waitng for is waking up */
- data = tr->data[wakeup_cpu];
+ pc = preempt_count();
/* disable local data, not wakeup_cpu data */
cpu = raw_smp_processor_id();
- disabled = atomic_inc_return(&tr->data[cpu]->disabled);
+ disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
if (likely(disabled != 1))
goto out;
- spin_lock_irqsave(&wakeup_lock, flags);
+ local_irq_save(flags);
+ __raw_spin_lock(&wakeup_lock);
/* We could race with grabbing wakeup_lock */
if (unlikely(!tracer_enabled || next != wakeup_task))
goto out_unlock;
- ftrace(tr, data, CALLER_ADDR1, CALLER_ADDR2, flags);
+ /* The task we are waiting for is waking up */
+ data = wakeup_trace->data[wakeup_cpu];
+
+ trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
+ tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
- /*
- * usecs conversion is slow so we try to delay the conversion
- * as long as possible:
- */
T0 = data->preempt_timestamp;
- T1 = now(cpu);
+ T1 = ftrace_now(cpu);
delta = T1-T0;
if (!report_latency(delta))
goto out_unlock;
- latency = nsecs_to_usecs(delta);
-
- tracing_max_latency = delta;
- t0 = nsecs_to_usecs(T0);
- t1 = nsecs_to_usecs(T1);
-
- update_max_tr(tr, wakeup_task, wakeup_cpu);
-
- if (tracing_thresh) {
- printk(KERN_INFO "(%16s-%-5d|#%d): %lu us wakeup latency "
- "violates %lu us threshold.\n"
- " => started at timestamp %lu: ",
- wakeup_task->comm, wakeup_task->pid,
- raw_smp_processor_id(),
- latency, nsecs_to_usecs(tracing_thresh), t0);
- } else {
- printk(KERN_INFO "(%16s-%-5d|#%d): new %lu us maximum "
- "wakeup latency.\n => started at timestamp %lu: ",
- wakeup_task->comm, wakeup_task->pid,
- cpu, latency, t0);
+ if (likely(!is_tracing_stopped())) {
+ tracing_max_latency = delta;
+ update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu);
}
- printk(KERN_CONT " ended at timestamp %lu: ", t1);
- dump_stack();
- t1 = nsecs_to_usecs(now(cpu));
- printk(KERN_CONT " dump-end timestamp %lu\n\n", t1);
-
out_unlock:
- __wakeup_reset(tr);
- spin_unlock_irqrestore(&wakeup_lock, flags);
+ __wakeup_reset(wakeup_trace);
+ __raw_spin_unlock(&wakeup_lock);
+ local_irq_restore(flags);
out:
- atomic_dec(&tr->data[cpu]->disabled);
+ atomic_dec(&wakeup_trace->data[cpu]->disabled);
}
-static void notrace __wakeup_reset(struct trace_array *tr)
+static void __wakeup_reset(struct trace_array *tr)
{
- struct trace_array_cpu *data;
- int cpu;
-
- assert_spin_locked(&wakeup_lock);
-
- for_each_possible_cpu(cpu) {
- data = tr->data[cpu];
- tracing_reset(data);
- }
-
wakeup_cpu = -1;
wakeup_prio = -1;
wakeup_task = NULL;
}
-static void notrace wakeup_reset(struct trace_array *tr)
+static void wakeup_reset(struct trace_array *tr)
{
unsigned long flags;
- spin_lock_irqsave(&wakeup_lock, flags);
+ tracing_reset_online_cpus(tr);
+
+ local_irq_save(flags);
+ __raw_spin_lock(&wakeup_lock);
__wakeup_reset(tr);
- spin_unlock_irqrestore(&wakeup_lock, flags);
+ __raw_spin_unlock(&wakeup_lock);
+ local_irq_restore(flags);
}
-static notrace void
-wakeup_check_start(struct trace_array *tr, struct task_struct *p,
- struct task_struct *curr)
+static void
+probe_wakeup(struct rq *rq, struct task_struct *p, int success)
{
+ struct trace_array_cpu *data;
int cpu = smp_processor_id();
unsigned long flags;
long disabled;
+ int pc;
- if (likely(!rt_task(p)) ||
+ if (likely(!tracer_enabled))
+ return;
+
+ tracing_record_cmdline(p);
+ tracing_record_cmdline(current);
+
+ if ((wakeup_rt && !rt_task(p)) ||
p->prio >= wakeup_prio ||
- p->prio >= curr->prio)
+ p->prio >= current->prio)
return;
- disabled = atomic_inc_return(&tr->data[cpu]->disabled);
+ pc = preempt_count();
+ disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
if (unlikely(disabled != 1))
goto out;
/* interrupts should be off from try_to_wake_up */
- spin_lock(&wakeup_lock);
+ __raw_spin_lock(&wakeup_lock);
/* check for races. */
if (!tracer_enabled || p->prio >= wakeup_prio)
goto out_locked;
/* reset the trace */
- __wakeup_reset(tr);
+ __wakeup_reset(wakeup_trace);
wakeup_cpu = task_cpu(p);
+ wakeup_current_cpu = wakeup_cpu;
wakeup_prio = p->prio;
wakeup_task = p;
local_save_flags(flags);
- tr->data[wakeup_cpu]->preempt_timestamp = now(cpu);
- ftrace(tr, tr->data[wakeup_cpu], CALLER_ADDR1, CALLER_ADDR2, flags);
+ data = wakeup_trace->data[wakeup_cpu];
+ data->preempt_timestamp = ftrace_now(cpu);
+ tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
+
+ /*
+ * We must be careful in using CALLER_ADDR2. But since wake_up
+ * is not called by an assembly function (where as schedule is)
+ * it should be safe to use it here.
+ */
+ trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
out_locked:
- spin_unlock(&wakeup_lock);
+ __raw_spin_unlock(&wakeup_lock);
out:
- atomic_dec(&tr->data[cpu]->disabled);
+ atomic_dec(&wakeup_trace->data[cpu]->disabled);
}
-notrace void
-ftrace_wake_up_task(struct task_struct *wakee, struct task_struct *curr)
+static void start_wakeup_tracer(struct trace_array *tr)
{
- if (likely(!tracer_enabled))
+ int ret;
+
+ ret = register_trace_sched_wakeup(probe_wakeup);
+ if (ret) {
+ pr_info("wakeup trace: Couldn't activate tracepoint"
+ " probe to kernel_sched_wakeup\n");
return;
+ }
- wakeup_check_start(wakeup_trace, wakee, curr);
-}
+ ret = register_trace_sched_wakeup_new(probe_wakeup);
+ if (ret) {
+ pr_info("wakeup trace: Couldn't activate tracepoint"
+ " probe to kernel_sched_wakeup_new\n");
+ goto fail_deprobe;
+ }
-notrace void
-ftrace_wake_up_new_task(struct task_struct *wakee, struct task_struct *curr)
-{
- if (likely(!tracer_enabled))
- return;
+ ret = register_trace_sched_switch(probe_wakeup_sched_switch);
+ if (ret) {
+ pr_info("sched trace: Couldn't activate tracepoint"
+ " probe to kernel_sched_switch\n");
+ goto fail_deprobe_wake_new;
+ }
- wakeup_check_start(wakeup_trace, wakee, curr);
-}
+ ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task);
+ if (ret) {
+ pr_info("wakeup trace: Couldn't activate tracepoint"
+ " probe to kernel_sched_migrate_task\n");
+ return;
+ }
-static notrace void start_wakeup_tracer(struct trace_array *tr)
-{
wakeup_reset(tr);
/*
*/
smp_wmb();
- tracer_enabled = 1;
+ register_ftrace_function(&trace_ops);
+
+ if (tracing_is_enabled())
+ tracer_enabled = 1;
+ else
+ tracer_enabled = 0;
return;
+fail_deprobe_wake_new:
+ unregister_trace_sched_wakeup_new(probe_wakeup);
+fail_deprobe:
+ unregister_trace_sched_wakeup(probe_wakeup);
}
-static notrace void stop_wakeup_tracer(struct trace_array *tr)
+static void stop_wakeup_tracer(struct trace_array *tr)
{
tracer_enabled = 0;
+ unregister_ftrace_function(&trace_ops);
+ unregister_trace_sched_switch(probe_wakeup_sched_switch);
+ unregister_trace_sched_wakeup_new(probe_wakeup);
+ unregister_trace_sched_wakeup(probe_wakeup);
+ unregister_trace_sched_migrate_task(probe_wakeup_migrate_task);
}
-static notrace void wakeup_tracer_init(struct trace_array *tr)
+static int __wakeup_tracer_init(struct trace_array *tr)
{
+ save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
+ trace_flags |= TRACE_ITER_LATENCY_FMT;
+
+ tracing_max_latency = 0;
wakeup_trace = tr;
+ start_wakeup_tracer(tr);
+ return 0;
+}
- if (tr->ctrl)
- start_wakeup_tracer(tr);
+static int wakeup_tracer_init(struct trace_array *tr)
+{
+ wakeup_rt = 0;
+ return __wakeup_tracer_init(tr);
}
-static notrace void wakeup_tracer_reset(struct trace_array *tr)
+static int wakeup_rt_tracer_init(struct trace_array *tr)
{
- if (tr->ctrl) {
- stop_wakeup_tracer(tr);
- /* make sure we put back any tasks we are tracing */
- wakeup_reset(tr);
- }
+ wakeup_rt = 1;
+ return __wakeup_tracer_init(tr);
}
-static void wakeup_tracer_ctrl_update(struct trace_array *tr)
+static void wakeup_tracer_reset(struct trace_array *tr)
{
- if (tr->ctrl)
- start_wakeup_tracer(tr);
- else
- stop_wakeup_tracer(tr);
+ stop_wakeup_tracer(tr);
+ /* make sure we put back any tasks we are tracing */
+ wakeup_reset(tr);
+
+ if (!save_lat_flag)
+ trace_flags &= ~TRACE_ITER_LATENCY_FMT;
}
-static void notrace wakeup_tracer_open(struct trace_iterator *iter)
+static void wakeup_tracer_start(struct trace_array *tr)
{
- /* stop the trace while dumping */
- if (iter->tr->ctrl)
- stop_wakeup_tracer(iter->tr);
+ wakeup_reset(tr);
+ tracer_enabled = 1;
}
-static void notrace wakeup_tracer_close(struct trace_iterator *iter)
+static void wakeup_tracer_stop(struct trace_array *tr)
{
- /* forget about any processes we were recording */
- if (iter->tr->ctrl)
- start_wakeup_tracer(iter->tr);
+ tracer_enabled = 0;
}
static struct tracer wakeup_tracer __read_mostly =
.name = "wakeup",
.init = wakeup_tracer_init,
.reset = wakeup_tracer_reset,
- .open = wakeup_tracer_open,
- .close = wakeup_tracer_close,
- .ctrl_update = wakeup_tracer_ctrl_update,
+ .start = wakeup_tracer_start,
+ .stop = wakeup_tracer_stop,
+ .print_max = 1,
+#ifdef CONFIG_FTRACE_SELFTEST
+ .selftest = trace_selftest_startup_wakeup,
+#endif
+};
+
+static struct tracer wakeup_rt_tracer __read_mostly =
+{
+ .name = "wakeup_rt",
+ .init = wakeup_rt_tracer_init,
+ .reset = wakeup_tracer_reset,
+ .start = wakeup_tracer_start,
+ .stop = wakeup_tracer_stop,
+ .wait_pipe = poll_wait_pipe,
.print_max = 1,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_wakeup,
if (ret)
return ret;
+ ret = register_tracer(&wakeup_rt_tracer);
+ if (ret)
+ return ret;
+
return 0;
}
device_initcall(init_wakeup_tracer);