2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/kthread.h>
20 #include <linux/hardirq.h>
21 #include <linux/ftrace.h>
22 #include <linux/module.h>
23 #include <linux/sysctl.h>
24 #include <linux/hash.h>
25 #include <linux/list.h>
29 #ifdef CONFIG_DYNAMIC_FTRACE
30 # define FTRACE_ENABLED_INIT 1
32 # define FTRACE_ENABLED_INIT 0
35 int ftrace_enabled = FTRACE_ENABLED_INIT;
36 static int last_ftrace_enabled = FTRACE_ENABLED_INIT;
38 static DEFINE_SPINLOCK(ftrace_lock);
39 static DEFINE_MUTEX(ftrace_sysctl_lock);
41 static struct ftrace_ops ftrace_list_end __read_mostly =
46 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
47 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
49 /* mcount is defined per arch in assembly */
50 EXPORT_SYMBOL(mcount);
52 notrace void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
54 struct ftrace_ops *op = ftrace_list;
56 /* in case someone actually ports this to alpha! */
57 read_barrier_depends();
59 while (op != &ftrace_list_end) {
61 read_barrier_depends();
62 op->func(ip, parent_ip);
68 * clear_ftrace_function - reset the ftrace function
70 * This NULLs the ftrace function and in essence stops
71 * tracing. There may be lag
73 void clear_ftrace_function(void)
75 ftrace_trace_function = ftrace_stub;
78 static int notrace __register_ftrace_function(struct ftrace_ops *ops)
80 /* Should never be called by interrupts */
81 spin_lock(&ftrace_lock);
83 ops->next = ftrace_list;
85 * We are entering ops into the ftrace_list but another
86 * CPU might be walking that list. We need to make sure
87 * the ops->next pointer is valid before another CPU sees
88 * the ops pointer included into the ftrace_list.
95 * For one func, simply call it directly.
96 * For more than one func, call the chain.
98 if (ops->next == &ftrace_list_end)
99 ftrace_trace_function = ops->func;
101 ftrace_trace_function = ftrace_list_func;
104 spin_unlock(&ftrace_lock);
109 static int notrace __unregister_ftrace_function(struct ftrace_ops *ops)
111 struct ftrace_ops **p;
114 spin_lock(&ftrace_lock);
117 * If we are removing the last function, then simply point
118 * to the ftrace_stub.
120 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
121 ftrace_trace_function = ftrace_stub;
122 ftrace_list = &ftrace_list_end;
126 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
137 if (ftrace_enabled) {
138 /* If we only have one func left, then call that directly */
139 if (ftrace_list == &ftrace_list_end ||
140 ftrace_list->next == &ftrace_list_end)
141 ftrace_trace_function = ftrace_list->func;
145 spin_unlock(&ftrace_lock);
150 #ifdef CONFIG_DYNAMIC_FTRACE
152 static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
154 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
156 static DEFINE_SPINLOCK(ftrace_shutdown_lock);
157 static DEFINE_MUTEX(ftraced_lock);
160 struct ftrace_page *next;
162 struct dyn_ftrace records[];
163 } __attribute__((packed));
165 #define ENTRIES_PER_PAGE \
166 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
168 /* estimate from running different kernels */
169 #define NR_TO_INIT 10000
171 static struct ftrace_page *ftrace_pages_start;
172 static struct ftrace_page *ftrace_pages;
174 static int ftraced_trigger;
175 static int ftraced_suspend;
177 static int ftrace_record_suspend;
180 notrace ftrace_ip_in_hash(unsigned long ip, unsigned long key)
182 struct dyn_ftrace *p;
183 struct hlist_node *t;
186 hlist_for_each_entry(p, t, &ftrace_hash[key], node) {
196 static inline void notrace
197 ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
199 hlist_add_head(&node->node, &ftrace_hash[key]);
202 static notrace struct dyn_ftrace *ftrace_alloc_shutdown_node(unsigned long ip)
204 /* If this was already converted, skip it */
205 if (ftrace_ip_converted(ip))
208 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
209 if (!ftrace_pages->next)
211 ftrace_pages = ftrace_pages->next;
214 return &ftrace_pages->records[ftrace_pages->index++];
218 ftrace_record_ip(unsigned long ip, unsigned long parent_ip)
220 struct dyn_ftrace *node;
226 resched = need_resched();
227 preempt_disable_notrace();
229 /* We simply need to protect against recursion */
230 __get_cpu_var(ftrace_shutdown_disable_cpu)++;
231 if (__get_cpu_var(ftrace_shutdown_disable_cpu) != 1)
234 if (unlikely(ftrace_record_suspend))
237 key = hash_long(ip, FTRACE_HASHBITS);
239 WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
241 if (ftrace_ip_in_hash(ip, key))
244 atomic = irqs_disabled();
246 spin_lock_irqsave(&ftrace_shutdown_lock, flags);
248 /* This ip may have hit the hash before the lock */
249 if (ftrace_ip_in_hash(ip, key))
253 * There's a slight race that the ftraced will update the
254 * hash and reset here. The arch alloc is responsible
255 * for seeing if the IP has already changed, and if
256 * it has, the alloc will fail.
258 node = ftrace_alloc_shutdown_node(ip);
264 ftrace_add_hash(node, key);
269 spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
271 __get_cpu_var(ftrace_shutdown_disable_cpu)--;
273 /* prevent recursion with scheduler */
275 preempt_enable_no_resched_notrace();
277 preempt_enable_notrace();
280 static struct ftrace_ops ftrace_shutdown_ops __read_mostly =
282 .func = ftrace_record_ip,
285 #define MCOUNT_ADDR ((long)(&mcount))
287 static void notrace ftrace_replace_code(int saved)
289 unsigned char *new = NULL, *old = NULL;
290 struct dyn_ftrace *rec;
291 struct ftrace_page *pg;
297 old = ftrace_nop_replace();
299 new = ftrace_nop_replace();
301 for (pg = ftrace_pages_start; pg; pg = pg->next) {
302 for (i = 0; i < pg->index; i++) {
303 rec = &pg->records[i];
305 /* don't modify code that has already faulted */
306 if (rec->flags & FTRACE_FL_FAILED)
312 new = ftrace_call_replace(ip, MCOUNT_ADDR);
314 old = ftrace_call_replace(ip, MCOUNT_ADDR);
316 failed = ftrace_modify_code(ip, old, new);
318 rec->flags |= FTRACE_FL_FAILED;
323 static notrace void ftrace_startup_code(void)
325 ftrace_replace_code(1);
328 static notrace void ftrace_shutdown_code(void)
330 ftrace_replace_code(0);
333 static notrace void ftrace_shutdown_replenish(void)
335 if (ftrace_pages->next)
338 /* allocate another page */
339 ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
342 static int notrace __ftrace_modify_code(void *data)
344 void (*func)(void) = data;
351 ftrace_code_disable(struct dyn_ftrace *rec, unsigned long addr)
354 unsigned char *nop, *call;
359 nop = ftrace_nop_replace();
360 call = ftrace_call_replace(ip, addr);
362 failed = ftrace_modify_code(ip, call, nop);
364 rec->flags |= FTRACE_FL_FAILED;
367 static void notrace ftrace_run_startup_code(void)
369 stop_machine_run(__ftrace_modify_code, ftrace_startup_code, NR_CPUS);
372 static void notrace ftrace_run_shutdown_code(void)
374 stop_machine_run(__ftrace_modify_code, ftrace_shutdown_code, NR_CPUS);
377 static void notrace ftrace_startup(void)
379 mutex_lock(&ftraced_lock);
381 if (ftraced_suspend != 1)
383 __unregister_ftrace_function(&ftrace_shutdown_ops);
386 ftrace_run_startup_code();
388 mutex_unlock(&ftraced_lock);
391 static void notrace ftrace_shutdown(void)
393 mutex_lock(&ftraced_lock);
399 ftrace_run_shutdown_code();
401 __register_ftrace_function(&ftrace_shutdown_ops);
403 mutex_unlock(&ftraced_lock);
406 static void notrace ftrace_startup_sysctl(void)
408 mutex_lock(&ftraced_lock);
409 /* ftraced_suspend is true if we want ftrace running */
411 ftrace_run_startup_code();
412 mutex_unlock(&ftraced_lock);
415 static void notrace ftrace_shutdown_sysctl(void)
417 mutex_lock(&ftraced_lock);
418 /* ftraced_suspend is true if ftrace is running */
420 ftrace_run_shutdown_code();
421 mutex_unlock(&ftraced_lock);
424 static cycle_t ftrace_update_time;
425 static unsigned long ftrace_update_cnt;
426 unsigned long ftrace_update_tot_cnt;
428 static int notrace __ftrace_update_code(void *ignore)
430 struct dyn_ftrace *p;
431 struct hlist_head head;
432 struct hlist_node *t;
436 /* Don't be calling ftrace ops now */
437 __unregister_ftrace_function(&ftrace_shutdown_ops);
439 start = now(raw_smp_processor_id());
440 ftrace_update_cnt = 0;
442 /* No locks needed, the machine is stopped! */
443 for (i = 0; i < FTRACE_HASHSIZE; i++) {
444 if (hlist_empty(&ftrace_hash[i]))
447 head = ftrace_hash[i];
448 INIT_HLIST_HEAD(&ftrace_hash[i]);
450 /* all CPUS are stopped, we are safe to modify code */
451 hlist_for_each_entry(p, t, &head, node) {
452 ftrace_code_disable(p, MCOUNT_ADDR);
458 stop = now(raw_smp_processor_id());
459 ftrace_update_time = stop - start;
460 ftrace_update_tot_cnt += ftrace_update_cnt;
462 __register_ftrace_function(&ftrace_shutdown_ops);
467 static void notrace ftrace_update_code(void)
469 stop_machine_run(__ftrace_update_code, NULL, NR_CPUS);
472 static int notrace ftraced(void *ignore)
476 set_current_state(TASK_INTERRUPTIBLE);
478 while (!kthread_should_stop()) {
480 /* check once a second */
481 schedule_timeout(HZ);
483 mutex_lock(&ftrace_sysctl_lock);
484 mutex_lock(&ftraced_lock);
485 if (ftrace_enabled && ftraced_trigger && !ftraced_suspend) {
486 ftrace_record_suspend++;
487 ftrace_update_code();
488 usecs = nsecs_to_usecs(ftrace_update_time);
489 if (ftrace_update_tot_cnt > 100000) {
490 ftrace_update_tot_cnt = 0;
491 pr_info("hm, dftrace overflow: %lu change%s"
492 " (%lu total) in %lu usec%s\n",
494 ftrace_update_cnt != 1 ? "s" : "",
495 ftrace_update_tot_cnt,
496 usecs, usecs != 1 ? "s" : "");
500 ftrace_record_suspend--;
502 mutex_unlock(&ftraced_lock);
503 mutex_unlock(&ftrace_sysctl_lock);
505 ftrace_shutdown_replenish();
507 set_current_state(TASK_INTERRUPTIBLE);
509 __set_current_state(TASK_RUNNING);
513 static int __init ftrace_dyn_table_alloc(void)
515 struct ftrace_page *pg;
520 ret = ftrace_dyn_arch_init();
524 /* allocate a few pages */
525 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
526 if (!ftrace_pages_start)
530 * Allocate a few more pages.
532 * TODO: have some parser search vmlinux before
533 * final linking to find all calls to ftrace.
535 * a) know how many pages to allocate.
537 * b) set up the table then.
539 * The dynamic code is still necessary for
543 pg = ftrace_pages = ftrace_pages_start;
545 cnt = NR_TO_INIT / ENTRIES_PER_PAGE;
547 for (i = 0; i < cnt; i++) {
548 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
550 /* If we fail, we'll try later anyway */
560 static int __init notrace ftrace_shutdown_init(void)
562 struct task_struct *p;
565 ret = ftrace_dyn_table_alloc();
569 p = kthread_run(ftraced, NULL, "ftraced");
573 __register_ftrace_function(&ftrace_shutdown_ops);
578 core_initcall(ftrace_shutdown_init);
580 # define ftrace_startup() do { } while (0)
581 # define ftrace_shutdown() do { } while (0)
582 # define ftrace_startup_sysctl() do { } while (0)
583 # define ftrace_shutdown_sysctl() do { } while (0)
584 #endif /* CONFIG_DYNAMIC_FTRACE */
587 * register_ftrace_function - register a function for profiling
588 * @ops - ops structure that holds the function for profiling.
590 * Register a function to be called by all functions in the
593 * Note: @ops->func and all the functions it calls must be labeled
594 * with "notrace", otherwise it will go into a
597 int register_ftrace_function(struct ftrace_ops *ops)
601 mutex_lock(&ftrace_sysctl_lock);
604 ret = __register_ftrace_function(ops);
605 mutex_unlock(&ftrace_sysctl_lock);
611 * unregister_ftrace_function - unresgister a function for profiling.
612 * @ops - ops structure that holds the function to unregister
614 * Unregister a function that was added to be called by ftrace profiling.
616 int unregister_ftrace_function(struct ftrace_ops *ops)
620 mutex_lock(&ftrace_sysctl_lock);
621 ret = __unregister_ftrace_function(ops);
623 if (ftrace_list == &ftrace_list_end)
626 mutex_unlock(&ftrace_sysctl_lock);
632 ftrace_enable_sysctl(struct ctl_table *table, int write,
633 struct file *filp, void __user *buffer, size_t *lenp,
638 mutex_lock(&ftrace_sysctl_lock);
640 ret = proc_dointvec(table, write, filp, buffer, lenp, ppos);
642 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
645 last_ftrace_enabled = ftrace_enabled;
647 if (ftrace_enabled) {
649 ftrace_startup_sysctl();
651 /* we are starting ftrace again */
652 if (ftrace_list != &ftrace_list_end) {
653 if (ftrace_list->next == &ftrace_list_end)
654 ftrace_trace_function = ftrace_list->func;
656 ftrace_trace_function = ftrace_list_func;
660 /* stopping ftrace calls (just send to ftrace_stub) */
661 ftrace_trace_function = ftrace_stub;
663 ftrace_shutdown_sysctl();
667 mutex_unlock(&ftrace_sysctl_lock);