2 * trace task wakeup timings
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Based on code from the latency_tracer, that is:
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III
12 #include <linux/module.h>
14 #include <linux/debugfs.h>
15 #include <linux/kallsyms.h>
16 #include <linux/uaccess.h>
17 #include <linux/ftrace.h>
18 #include <trace/sched.h>
22 static struct trace_array *wakeup_trace;
23 static int __read_mostly tracer_enabled;
25 static struct task_struct *wakeup_task;
26 static int wakeup_cpu;
27 static unsigned wakeup_prio = -1;
30 static raw_spinlock_t wakeup_lock =
31 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
33 static void __wakeup_reset(struct trace_array *tr);
35 #ifdef CONFIG_FUNCTION_TRACER
37 * irqsoff uses its own tracer function to keep the overhead down:
40 wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
42 struct trace_array *tr = wakeup_trace;
43 struct trace_array_cpu *data;
50 if (likely(!wakeup_task))
54 resched = ftrace_preempt_disable();
56 cpu = raw_smp_processor_id();
58 disabled = atomic_inc_return(&data->disabled);
59 if (unlikely(disabled != 1))
62 local_irq_save(flags);
63 __raw_spin_lock(&wakeup_lock);
65 if (unlikely(!wakeup_task))
69 * The task can't disappear because it needs to
70 * wake up first, and we have the wakeup_lock.
72 if (task_cpu(wakeup_task) != cpu)
75 trace_function(tr, data, ip, parent_ip, flags, pc);
78 __raw_spin_unlock(&wakeup_lock);
79 local_irq_restore(flags);
82 atomic_dec(&data->disabled);
84 ftrace_preempt_enable(resched);
87 static struct ftrace_ops trace_ops __read_mostly =
89 .func = wakeup_tracer_call,
91 #endif /* CONFIG_FUNCTION_TRACER */
94 * Should this new latency be reported/recorded?
96 static int report_latency(cycle_t delta)
99 if (delta < tracing_thresh)
102 if (delta <= tracing_max_latency)
109 probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
110 struct task_struct *next)
112 unsigned long latency = 0, t0 = 0, t1 = 0;
113 struct trace_array_cpu *data;
114 cycle_t T0, T1, delta;
120 tracing_record_cmdline(prev);
122 if (unlikely(!tracer_enabled))
126 * When we start a new trace, we set wakeup_task to NULL
127 * and then set tracer_enabled = 1. We want to make sure
128 * that another CPU does not see the tracer_enabled = 1
129 * and the wakeup_task with an older task, that might
130 * actually be the same as next.
134 if (next != wakeup_task)
137 pc = preempt_count();
139 /* The task we are waiting for is waking up */
140 data = wakeup_trace->data[wakeup_cpu];
142 /* disable local data, not wakeup_cpu data */
143 cpu = raw_smp_processor_id();
144 disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
145 if (likely(disabled != 1))
148 local_irq_save(flags);
149 __raw_spin_lock(&wakeup_lock);
151 /* We could race with grabbing wakeup_lock */
152 if (unlikely(!tracer_enabled || next != wakeup_task))
155 trace_function(wakeup_trace, data, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
156 tracing_sched_switch_trace(wakeup_trace, data, prev, next, flags, pc);
159 * usecs conversion is slow so we try to delay the conversion
160 * as long as possible:
162 T0 = data->preempt_timestamp;
163 T1 = ftrace_now(cpu);
166 if (!report_latency(delta))
169 latency = nsecs_to_usecs(delta);
171 tracing_max_latency = delta;
172 t0 = nsecs_to_usecs(T0);
173 t1 = nsecs_to_usecs(T1);
175 update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu);
178 __wakeup_reset(wakeup_trace);
179 __raw_spin_unlock(&wakeup_lock);
180 local_irq_restore(flags);
182 atomic_dec(&wakeup_trace->data[cpu]->disabled);
185 static void __wakeup_reset(struct trace_array *tr)
187 struct trace_array_cpu *data;
190 for_each_possible_cpu(cpu) {
191 data = tr->data[cpu];
192 tracing_reset(tr, cpu);
199 put_task_struct(wakeup_task);
204 static void wakeup_reset(struct trace_array *tr)
208 local_irq_save(flags);
209 __raw_spin_lock(&wakeup_lock);
211 __raw_spin_unlock(&wakeup_lock);
212 local_irq_restore(flags);
216 probe_wakeup(struct rq *rq, struct task_struct *p, int success)
218 struct trace_array_cpu *data;
219 int cpu = smp_processor_id();
224 if (likely(!tracer_enabled))
227 tracing_record_cmdline(p);
228 tracing_record_cmdline(current);
230 if ((wakeup_rt && !rt_task(p)) ||
231 p->prio >= wakeup_prio ||
232 p->prio >= current->prio)
235 pc = preempt_count();
236 disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
237 if (unlikely(disabled != 1))
240 /* interrupts should be off from try_to_wake_up */
241 __raw_spin_lock(&wakeup_lock);
243 /* check for races. */
244 if (!tracer_enabled || p->prio >= wakeup_prio)
247 /* reset the trace */
248 __wakeup_reset(wakeup_trace);
250 wakeup_cpu = task_cpu(p);
251 wakeup_prio = p->prio;
254 get_task_struct(wakeup_task);
256 local_save_flags(flags);
258 data = wakeup_trace->data[wakeup_cpu];
259 data->preempt_timestamp = ftrace_now(cpu);
260 tracing_sched_wakeup_trace(wakeup_trace, data, p, current,
262 trace_function(wakeup_trace, data, CALLER_ADDR1, CALLER_ADDR2,
266 __raw_spin_unlock(&wakeup_lock);
268 atomic_dec(&wakeup_trace->data[cpu]->disabled);
271 static void start_wakeup_tracer(struct trace_array *tr)
275 ret = register_trace_sched_wakeup(probe_wakeup);
277 pr_info("wakeup trace: Couldn't activate tracepoint"
278 " probe to kernel_sched_wakeup\n");
282 ret = register_trace_sched_wakeup_new(probe_wakeup);
284 pr_info("wakeup trace: Couldn't activate tracepoint"
285 " probe to kernel_sched_wakeup_new\n");
289 ret = register_trace_sched_switch(probe_wakeup_sched_switch);
291 pr_info("sched trace: Couldn't activate tracepoint"
292 " probe to kernel_sched_schedule\n");
293 goto fail_deprobe_wake_new;
299 * Don't let the tracer_enabled = 1 show up before
300 * the wakeup_task is reset. This may be overkill since
301 * wakeup_reset does a spin_unlock after setting the
302 * wakeup_task to NULL, but I want to be safe.
303 * This is a slow path anyway.
307 register_ftrace_function(&trace_ops);
309 if (tracing_is_enabled())
315 fail_deprobe_wake_new:
316 unregister_trace_sched_wakeup_new(probe_wakeup);
318 unregister_trace_sched_wakeup(probe_wakeup);
321 static void stop_wakeup_tracer(struct trace_array *tr)
324 unregister_ftrace_function(&trace_ops);
325 unregister_trace_sched_switch(probe_wakeup_sched_switch);
326 unregister_trace_sched_wakeup_new(probe_wakeup);
327 unregister_trace_sched_wakeup(probe_wakeup);
330 static int __wakeup_tracer_init(struct trace_array *tr)
332 tracing_max_latency = 0;
334 start_wakeup_tracer(tr);
338 static int wakeup_tracer_init(struct trace_array *tr)
341 return __wakeup_tracer_init(tr);
344 static int wakeup_rt_tracer_init(struct trace_array *tr)
347 return __wakeup_tracer_init(tr);
350 static void wakeup_tracer_reset(struct trace_array *tr)
352 stop_wakeup_tracer(tr);
353 /* make sure we put back any tasks we are tracing */
357 static void wakeup_tracer_start(struct trace_array *tr)
363 static void wakeup_tracer_stop(struct trace_array *tr)
368 static struct tracer wakeup_tracer __read_mostly =
371 .init = wakeup_tracer_init,
372 .reset = wakeup_tracer_reset,
373 .start = wakeup_tracer_start,
374 .stop = wakeup_tracer_stop,
376 #ifdef CONFIG_FTRACE_SELFTEST
377 .selftest = trace_selftest_startup_wakeup,
381 static struct tracer wakeup_rt_tracer __read_mostly =
384 .init = wakeup_rt_tracer_init,
385 .reset = wakeup_tracer_reset,
386 .start = wakeup_tracer_start,
387 .stop = wakeup_tracer_stop,
389 #ifdef CONFIG_FTRACE_SELFTEST
390 .selftest = trace_selftest_startup_wakeup,
394 __init static int init_wakeup_tracer(void)
398 ret = register_tracer(&wakeup_tracer);
402 ret = register_tracer(&wakeup_rt_tracer);
408 device_initcall(init_wakeup_tracer);