1 /* Include in trace.c */
3 #include <linux/kthread.h>
4 #include <linux/delay.h>
6 static inline int trace_valid_entry(struct trace_entry *entry)
22 static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
24 struct ring_buffer_event *event;
25 struct trace_entry *entry;
27 while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
28 entry = ring_buffer_event_data(event);
30 if (!trace_valid_entry(entry)) {
31 printk(KERN_CONT ".. invalid entry %d ",
41 printk(KERN_CONT ".. corrupted trace buffer .. ");
46 * Test the trace buffer to see if all the elements
49 static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
51 unsigned long flags, cnt = 0;
54 /* Don't allow flipping of max traces now */
55 local_irq_save(flags);
56 __raw_spin_lock(&ftrace_max_lock);
58 cnt = ring_buffer_entries(tr->buffer);
60 for_each_possible_cpu(cpu) {
61 ret = trace_test_buffer_cpu(tr, cpu);
65 __raw_spin_unlock(&ftrace_max_lock);
66 local_irq_restore(flags);
74 #ifdef CONFIG_FUNCTION_TRACER
76 #ifdef CONFIG_DYNAMIC_FTRACE
79 #define STR(x) __STR(x)
81 /* Test dynamic code modification and ftrace filters */
82 int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
83 struct trace_array *tr,
86 int save_ftrace_enabled = ftrace_enabled;
87 int save_tracer_enabled = tracer_enabled;
92 /* The ftrace test PASSED */
93 printk(KERN_CONT "PASSED\n");
94 pr_info("Testing dynamic ftrace: ");
96 /* enable tracing, and record the filter function */
100 /* passed in by parameter to fool gcc from optimizing */
104 * Some archs *cough*PowerPC*cough* add charachters to the
105 * start of the function names. We simply put a '*' to
108 func_name = "*" STR(DYN_FTRACE_TEST_NAME);
110 /* filter only on our function */
111 ftrace_set_filter(func_name, strlen(func_name), 1);
116 /* Sleep for a 1/10 of a second */
119 /* we should have nothing in the buffer */
120 ret = trace_test_buffer(tr, &count);
126 printk(KERN_CONT ".. filter did not filter .. ");
130 /* call our function again */
136 /* stop the tracing. */
140 /* check the trace buffer */
141 ret = trace_test_buffer(tr, &count);
145 /* we should only have one item */
146 if (!ret && count != 1) {
147 printk(KERN_CONT ".. filter failed count=%ld ..", count);
153 ftrace_enabled = save_ftrace_enabled;
154 tracer_enabled = save_tracer_enabled;
156 /* Enable tracing on all functions again */
157 ftrace_set_filter(NULL, 0, 1);
162 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
163 #endif /* CONFIG_DYNAMIC_FTRACE */
165 * Simple verification test of ftrace function tracer.
166 * Enable ftrace, sleep 1/10 second, and then read the trace
167 * buffer to see if all is in order.
170 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
172 int save_ftrace_enabled = ftrace_enabled;
173 int save_tracer_enabled = tracer_enabled;
177 /* make sure msleep has been recorded */
180 /* start the tracing */
185 /* Sleep for a 1/10 of a second */
187 /* stop the tracing. */
191 /* check the trace buffer */
192 ret = trace_test_buffer(tr, &count);
196 if (!ret && !count) {
197 printk(KERN_CONT ".. no entries found ..");
202 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
203 DYN_FTRACE_TEST_NAME);
206 ftrace_enabled = save_ftrace_enabled;
207 tracer_enabled = save_tracer_enabled;
209 /* kill ftrace totally if we failed */
215 #endif /* CONFIG_FUNCTION_TRACER */
217 #ifdef CONFIG_IRQSOFF_TRACER
219 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
221 unsigned long save_max = tracing_max_latency;
225 /* start the tracing */
227 /* reset the max latency */
228 tracing_max_latency = 0;
229 /* disable interrupts for a bit */
233 /* stop the tracing. */
235 /* check both trace buffers */
236 ret = trace_test_buffer(tr, NULL);
238 ret = trace_test_buffer(&max_tr, &count);
242 if (!ret && !count) {
243 printk(KERN_CONT ".. no entries found ..");
247 tracing_max_latency = save_max;
251 #endif /* CONFIG_IRQSOFF_TRACER */
253 #ifdef CONFIG_PREEMPT_TRACER
255 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
257 unsigned long save_max = tracing_max_latency;
262 * Now that the big kernel lock is no longer preemptable,
263 * and this is called with the BKL held, it will always
264 * fail. If preemption is already disabled, simply
265 * pass the test. When the BKL is removed, or becomes
266 * preemptible again, we will once again test this,
269 if (preempt_count()) {
270 printk(KERN_CONT "can not test ... force ");
274 /* start the tracing */
276 /* reset the max latency */
277 tracing_max_latency = 0;
278 /* disable preemption for a bit */
282 /* stop the tracing. */
284 /* check both trace buffers */
285 ret = trace_test_buffer(tr, NULL);
287 ret = trace_test_buffer(&max_tr, &count);
291 if (!ret && !count) {
292 printk(KERN_CONT ".. no entries found ..");
296 tracing_max_latency = save_max;
300 #endif /* CONFIG_PREEMPT_TRACER */
302 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
304 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
306 unsigned long save_max = tracing_max_latency;
311 * Now that the big kernel lock is no longer preemptable,
312 * and this is called with the BKL held, it will always
313 * fail. If preemption is already disabled, simply
314 * pass the test. When the BKL is removed, or becomes
315 * preemptible again, we will once again test this,
318 if (preempt_count()) {
319 printk(KERN_CONT "can not test ... force ");
323 /* start the tracing */
326 /* reset the max latency */
327 tracing_max_latency = 0;
329 /* disable preemption and interrupts for a bit */
334 /* reverse the order of preempt vs irqs */
337 /* stop the tracing. */
339 /* check both trace buffers */
340 ret = trace_test_buffer(tr, NULL);
346 ret = trace_test_buffer(&max_tr, &count);
352 if (!ret && !count) {
353 printk(KERN_CONT ".. no entries found ..");
359 /* do the test by disabling interrupts first this time */
360 tracing_max_latency = 0;
366 /* reverse the order of preempt vs irqs */
369 /* stop the tracing. */
371 /* check both trace buffers */
372 ret = trace_test_buffer(tr, NULL);
376 ret = trace_test_buffer(&max_tr, &count);
378 if (!ret && !count) {
379 printk(KERN_CONT ".. no entries found ..");
387 tracing_max_latency = save_max;
391 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
393 #ifdef CONFIG_NOP_TRACER
395 trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
397 /* What could possibly go wrong? */
402 #ifdef CONFIG_SCHED_TRACER
403 static int trace_wakeup_test_thread(void *data)
405 /* Make this a RT thread, doesn't need to be too high */
406 struct sched_param param = { .sched_priority = 5 };
407 struct completion *x = data;
409 sched_setscheduler(current, SCHED_FIFO, ¶m);
411 /* Make it know we have a new prio */
414 /* now go to sleep and let the test wake us up */
415 set_current_state(TASK_INTERRUPTIBLE);
418 /* we are awake, now wait to disappear */
419 while (!kthread_should_stop()) {
421 * This is an RT task, do short sleeps to let
431 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
433 unsigned long save_max = tracing_max_latency;
434 struct task_struct *p;
435 struct completion isrt;
439 init_completion(&isrt);
441 /* create a high prio thread */
442 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
444 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
448 /* make sure the thread is running at an RT prio */
449 wait_for_completion(&isrt);
451 /* start the tracing */
453 /* reset the max latency */
454 tracing_max_latency = 0;
456 /* sleep to let the RT thread sleep too */
460 * Yes this is slightly racy. It is possible that for some
461 * strange reason that the RT thread we created, did not
462 * call schedule for 100ms after doing the completion,
463 * and we do a wakeup on a task that already is awake.
464 * But that is extremely unlikely, and the worst thing that
465 * happens in such a case, is that we disable tracing.
466 * Honestly, if this race does happen something is horrible
467 * wrong with the system.
472 /* give a little time to let the thread wake up */
475 /* stop the tracing. */
477 /* check both trace buffers */
478 ret = trace_test_buffer(tr, NULL);
480 ret = trace_test_buffer(&max_tr, &count);
486 tracing_max_latency = save_max;
488 /* kill the thread */
491 if (!ret && !count) {
492 printk(KERN_CONT ".. no entries found ..");
498 #endif /* CONFIG_SCHED_TRACER */
500 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
502 trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
507 /* start the tracing */
509 /* Sleep for a 1/10 of a second */
511 /* stop the tracing. */
513 /* check the trace buffer */
514 ret = trace_test_buffer(tr, &count);
518 if (!ret && !count) {
519 printk(KERN_CONT ".. no entries found ..");
525 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
527 #ifdef CONFIG_SYSPROF_TRACER
529 trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
534 /* start the tracing */
536 /* Sleep for a 1/10 of a second */
538 /* stop the tracing. */
540 /* check the trace buffer */
541 ret = trace_test_buffer(tr, &count);
547 #endif /* CONFIG_SYSPROF_TRACER */
549 #ifdef CONFIG_BRANCH_TRACER
551 trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
556 /* start the tracing */
558 /* Sleep for a 1/10 of a second */
560 /* stop the tracing. */
562 /* check the trace buffer */
563 ret = trace_test_buffer(tr, &count);
569 #endif /* CONFIG_BRANCH_TRACER */