2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/kthread.h>
20 #include <linux/hardirq.h>
21 #include <linux/ftrace.h>
22 #include <linux/module.h>
23 #include <linux/hash.h>
24 #include <linux/list.h>
28 static DEFINE_SPINLOCK(ftrace_lock);
29 static struct ftrace_ops ftrace_list_end __read_mostly =
34 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
35 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
37 /* mcount is defined per arch in assembly */
38 EXPORT_SYMBOL(mcount);
40 notrace void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
42 struct ftrace_ops *op = ftrace_list;
44 /* in case someone actually ports this to alpha! */
45 read_barrier_depends();
47 while (op != &ftrace_list_end) {
49 read_barrier_depends();
50 op->func(ip, parent_ip);
56 * clear_ftrace_function - reset the ftrace function
58 * This NULLs the ftrace function and in essence stops
59 * tracing. There may be lag
61 void clear_ftrace_function(void)
63 ftrace_trace_function = ftrace_stub;
66 static int notrace __register_ftrace_function(struct ftrace_ops *ops)
68 /* Should never be called by interrupts */
69 spin_lock(&ftrace_lock);
71 ops->next = ftrace_list;
73 * We are entering ops into the ftrace_list but another
74 * CPU might be walking that list. We need to make sure
75 * the ops->next pointer is valid before another CPU sees
76 * the ops pointer included into the ftrace_list.
82 * For one func, simply call it directly.
83 * For more than one func, call the chain.
85 if (ops->next == &ftrace_list_end)
86 ftrace_trace_function = ops->func;
88 ftrace_trace_function = ftrace_list_func;
90 spin_unlock(&ftrace_lock);
95 static int notrace __unregister_ftrace_function(struct ftrace_ops *ops)
97 struct ftrace_ops **p;
100 spin_lock(&ftrace_lock);
103 * If we are removing the last function, then simply point
104 * to the ftrace_stub.
106 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
107 ftrace_trace_function = ftrace_stub;
108 ftrace_list = &ftrace_list_end;
112 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
123 /* If we only have one func left, then call that directly */
124 if (ftrace_list == &ftrace_list_end ||
125 ftrace_list->next == &ftrace_list_end)
126 ftrace_trace_function = ftrace_list->func;
129 spin_unlock(&ftrace_lock);
134 #ifdef CONFIG_DYNAMIC_FTRACE
136 static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
138 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
140 static DEFINE_SPINLOCK(ftrace_shutdown_lock);
141 static DEFINE_MUTEX(ftraced_lock);
143 static int ftraced_trigger;
144 static int ftraced_suspend;
146 static int ftrace_record_suspend;
149 notrace ftrace_ip_in_hash(unsigned long ip, unsigned long key)
151 struct dyn_ftrace *p;
152 struct hlist_node *t;
155 hlist_for_each_entry(p, t, &ftrace_hash[key], node) {
165 static inline void notrace
166 ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
168 hlist_add_head(&node->node, &ftrace_hash[key]);
172 ftrace_record_ip(unsigned long ip, unsigned long parent_ip)
174 struct dyn_ftrace *node;
180 resched = need_resched();
181 preempt_disable_notrace();
183 /* We simply need to protect against recursion */
184 __get_cpu_var(ftrace_shutdown_disable_cpu)++;
185 if (__get_cpu_var(ftrace_shutdown_disable_cpu) != 1)
188 if (unlikely(ftrace_record_suspend))
191 key = hash_long(ip, FTRACE_HASHBITS);
193 WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
195 if (ftrace_ip_in_hash(ip, key))
198 atomic = irqs_disabled();
200 spin_lock_irqsave(&ftrace_shutdown_lock, flags);
202 /* This ip may have hit the hash before the lock */
203 if (ftrace_ip_in_hash(ip, key))
207 * There's a slight race that the ftraced will update the
208 * hash and reset here. The arch alloc is responsible
209 * for seeing if the IP has already changed, and if
210 * it has, the alloc will fail.
212 node = ftrace_alloc_shutdown_node(ip);
218 ftrace_add_hash(node, key);
223 spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
225 __get_cpu_var(ftrace_shutdown_disable_cpu)--;
227 /* prevent recursion with scheduler */
229 preempt_enable_no_resched_notrace();
231 preempt_enable_notrace();
234 static struct ftrace_ops ftrace_shutdown_ops __read_mostly =
236 .func = ftrace_record_ip,
240 static int notrace __ftrace_modify_code(void *data)
242 void (*func)(void) = data;
248 static void notrace ftrace_run_startup_code(void)
250 stop_machine_run(__ftrace_modify_code, ftrace_startup_code, NR_CPUS);
253 static void notrace ftrace_run_shutdown_code(void)
255 stop_machine_run(__ftrace_modify_code, ftrace_shutdown_code, NR_CPUS);
258 static void notrace ftrace_startup(void)
260 mutex_lock(&ftraced_lock);
262 if (ftraced_suspend != 1)
264 __unregister_ftrace_function(&ftrace_shutdown_ops);
266 ftrace_run_startup_code();
268 mutex_unlock(&ftraced_lock);
271 static void notrace ftrace_shutdown(void)
273 mutex_lock(&ftraced_lock);
278 ftrace_run_shutdown_code();
280 __register_ftrace_function(&ftrace_shutdown_ops);
282 mutex_unlock(&ftraced_lock);
285 static cycle_t ftrace_update_time;
286 static unsigned long ftrace_update_cnt;
287 unsigned long ftrace_update_tot_cnt;
289 static int notrace __ftrace_update_code(void *ignore)
291 struct dyn_ftrace *p;
292 struct hlist_head head;
293 struct hlist_node *t;
297 /* Don't be calling ftrace ops now */
298 __unregister_ftrace_function(&ftrace_shutdown_ops);
300 start = now(raw_smp_processor_id());
301 ftrace_update_cnt = 0;
303 /* No locks needed, the machine is stopped! */
304 for (i = 0; i < FTRACE_HASHSIZE; i++) {
305 if (hlist_empty(&ftrace_hash[i]))
308 head = ftrace_hash[i];
309 INIT_HLIST_HEAD(&ftrace_hash[i]);
311 /* all CPUS are stopped, we are safe to modify code */
312 hlist_for_each_entry(p, t, &head, node) {
313 ftrace_code_disable(p);
319 stop = now(raw_smp_processor_id());
320 ftrace_update_time = stop - start;
321 ftrace_update_tot_cnt += ftrace_update_cnt;
323 __register_ftrace_function(&ftrace_shutdown_ops);
328 static void notrace ftrace_update_code(void)
330 stop_machine_run(__ftrace_update_code, NULL, NR_CPUS);
333 static int notrace ftraced(void *ignore)
337 set_current_state(TASK_INTERRUPTIBLE);
339 while (!kthread_should_stop()) {
341 /* check once a second */
342 schedule_timeout(HZ);
344 mutex_lock(&ftraced_lock);
345 if (ftraced_trigger && !ftraced_suspend) {
346 ftrace_record_suspend++;
347 ftrace_update_code();
348 usecs = nsecs_to_usecs(ftrace_update_time);
349 if (ftrace_update_tot_cnt > 100000) {
350 ftrace_update_tot_cnt = 0;
351 pr_info("hm, dftrace overflow: %lu change%s"
352 " (%lu total) in %lu usec%s\n",
354 ftrace_update_cnt != 1 ? "s" : "",
355 ftrace_update_tot_cnt,
356 usecs, usecs != 1 ? "s" : "");
360 ftrace_record_suspend--;
362 mutex_unlock(&ftraced_lock);
364 ftrace_shutdown_replenish();
366 set_current_state(TASK_INTERRUPTIBLE);
368 __set_current_state(TASK_RUNNING);
372 static int __init notrace ftrace_shutdown_init(void)
374 struct task_struct *p;
377 ret = ftrace_shutdown_arch_init();
381 p = kthread_run(ftraced, NULL, "ftraced");
385 __register_ftrace_function(&ftrace_shutdown_ops);
390 core_initcall(ftrace_shutdown_init);
392 # define ftrace_startup() do { } while (0)
393 # define ftrace_shutdown() do { } while (0)
394 #endif /* CONFIG_DYNAMIC_FTRACE */
397 * register_ftrace_function - register a function for profiling
398 * @ops - ops structure that holds the function for profiling.
400 * Register a function to be called by all functions in the
403 * Note: @ops->func and all the functions it calls must be labeled
404 * with "notrace", otherwise it will go into a
407 int register_ftrace_function(struct ftrace_ops *ops)
411 return __register_ftrace_function(ops);
415 * unregister_ftrace_function - unresgister a function for profiling.
416 * @ops - ops structure that holds the function to unregister
418 * Unregister a function that was added to be called by ftrace profiling.
420 int unregister_ftrace_function(struct ftrace_ops *ops)
424 ret = __unregister_ftrace_function(ops);
426 if (ftrace_list == &ftrace_list_end)