2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/kprobes.h>
26 #include <linux/ftrace.h>
27 #include <linux/sysctl.h>
28 #include <linux/ctype.h>
29 #include <linux/list.h>
30 #include <linux/hash.h>
32 #include <trace/events/sched.h>
34 #include <asm/ftrace.h>
35 #include <asm/setup.h>
37 #include "trace_output.h"
38 #include "trace_stat.h"
40 #define FTRACE_WARN_ON(cond) \
46 #define FTRACE_WARN_ON_ONCE(cond) \
48 if (WARN_ON_ONCE(cond)) \
52 /* hash bits for specific function selection */
53 #define FTRACE_HASH_BITS 7
54 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
56 /* ftrace_enabled is a method to turn ftrace on or off */
57 int ftrace_enabled __read_mostly;
58 static int last_ftrace_enabled;
60 /* Quick disabling of function tracer. */
61 int function_trace_stop;
64 * ftrace_disabled is set when an anomaly is discovered.
65 * ftrace_disabled is much stronger than ftrace_enabled.
67 static int ftrace_disabled __read_mostly;
69 static DEFINE_MUTEX(ftrace_lock);
71 static struct ftrace_ops ftrace_list_end __read_mostly =
76 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
77 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
78 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
79 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
81 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
83 struct ftrace_ops *op = ftrace_list;
85 /* in case someone actually ports this to alpha! */
86 read_barrier_depends();
88 while (op != &ftrace_list_end) {
90 read_barrier_depends();
91 op->func(ip, parent_ip);
96 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
98 if (!test_tsk_trace_trace(current))
101 ftrace_pid_function(ip, parent_ip);
104 static void set_ftrace_pid_function(ftrace_func_t func)
106 /* do not set ftrace_pid_function to itself! */
107 if (func != ftrace_pid_func)
108 ftrace_pid_function = func;
112 * clear_ftrace_function - reset the ftrace function
114 * This NULLs the ftrace function and in essence stops
115 * tracing. There may be lag
117 void clear_ftrace_function(void)
119 ftrace_trace_function = ftrace_stub;
120 __ftrace_trace_function = ftrace_stub;
121 ftrace_pid_function = ftrace_stub;
124 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
126 * For those archs that do not test ftrace_trace_stop in their
127 * mcount call site, we need to do it from C.
129 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
131 if (function_trace_stop)
134 __ftrace_trace_function(ip, parent_ip);
138 static int __register_ftrace_function(struct ftrace_ops *ops)
140 ops->next = ftrace_list;
142 * We are entering ops into the ftrace_list but another
143 * CPU might be walking that list. We need to make sure
144 * the ops->next pointer is valid before another CPU sees
145 * the ops pointer included into the ftrace_list.
150 if (ftrace_enabled) {
153 if (ops->next == &ftrace_list_end)
156 func = ftrace_list_func;
158 if (ftrace_pid_trace) {
159 set_ftrace_pid_function(func);
160 func = ftrace_pid_func;
164 * For one func, simply call it directly.
165 * For more than one func, call the chain.
167 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
168 ftrace_trace_function = func;
170 __ftrace_trace_function = func;
171 ftrace_trace_function = ftrace_test_stop_func;
178 static int __unregister_ftrace_function(struct ftrace_ops *ops)
180 struct ftrace_ops **p;
183 * If we are removing the last function, then simply point
184 * to the ftrace_stub.
186 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
187 ftrace_trace_function = ftrace_stub;
188 ftrace_list = &ftrace_list_end;
192 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
201 if (ftrace_enabled) {
202 /* If we only have one func left, then call that directly */
203 if (ftrace_list->next == &ftrace_list_end) {
204 ftrace_func_t func = ftrace_list->func;
206 if (ftrace_pid_trace) {
207 set_ftrace_pid_function(func);
208 func = ftrace_pid_func;
210 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
211 ftrace_trace_function = func;
213 __ftrace_trace_function = func;
221 static void ftrace_update_pid_func(void)
225 if (ftrace_trace_function == ftrace_stub)
228 func = ftrace_trace_function;
230 if (ftrace_pid_trace) {
231 set_ftrace_pid_function(func);
232 func = ftrace_pid_func;
234 if (func == ftrace_pid_func)
235 func = ftrace_pid_function;
238 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
239 ftrace_trace_function = func;
241 __ftrace_trace_function = func;
245 #ifdef CONFIG_FUNCTION_PROFILER
246 struct ftrace_profile {
247 struct hlist_node node;
249 unsigned long counter;
250 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
251 unsigned long long time;
255 struct ftrace_profile_page {
256 struct ftrace_profile_page *next;
258 struct ftrace_profile records[];
261 struct ftrace_profile_stat {
263 struct hlist_head *hash;
264 struct ftrace_profile_page *pages;
265 struct ftrace_profile_page *start;
266 struct tracer_stat stat;
269 #define PROFILE_RECORDS_SIZE \
270 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
272 #define PROFILES_PER_PAGE \
273 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
275 static int ftrace_profile_bits __read_mostly;
276 static int ftrace_profile_enabled __read_mostly;
278 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
279 static DEFINE_MUTEX(ftrace_profile_lock);
281 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
283 #define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
286 function_stat_next(void *v, int idx)
288 struct ftrace_profile *rec = v;
289 struct ftrace_profile_page *pg;
291 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
297 if ((void *)rec >= (void *)&pg->records[pg->index]) {
301 rec = &pg->records[0];
309 static void *function_stat_start(struct tracer_stat *trace)
311 struct ftrace_profile_stat *stat =
312 container_of(trace, struct ftrace_profile_stat, stat);
314 if (!stat || !stat->start)
317 return function_stat_next(&stat->start->records[0], 0);
320 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
321 /* function graph compares on total time */
322 static int function_stat_cmp(void *p1, void *p2)
324 struct ftrace_profile *a = p1;
325 struct ftrace_profile *b = p2;
327 if (a->time < b->time)
329 if (a->time > b->time)
335 /* not function graph compares against hits */
336 static int function_stat_cmp(void *p1, void *p2)
338 struct ftrace_profile *a = p1;
339 struct ftrace_profile *b = p2;
341 if (a->counter < b->counter)
343 if (a->counter > b->counter)
350 static int function_stat_headers(struct seq_file *m)
352 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
353 seq_printf(m, " Function "
358 seq_printf(m, " Function Hit\n"
364 static int function_stat_show(struct seq_file *m, void *v)
366 struct ftrace_profile *rec = v;
367 char str[KSYM_SYMBOL_LEN];
368 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
369 static DEFINE_MUTEX(mutex);
370 static struct trace_seq s;
371 unsigned long long avg;
374 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
375 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
377 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
380 do_div(avg, rec->counter);
384 trace_print_graph_duration(rec->time, &s);
385 trace_seq_puts(&s, " ");
386 trace_print_graph_duration(avg, &s);
387 trace_print_seq(m, &s);
388 mutex_unlock(&mutex);
395 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
397 struct ftrace_profile_page *pg;
399 pg = stat->pages = stat->start;
402 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
407 memset(stat->hash, 0,
408 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
411 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
413 struct ftrace_profile_page *pg;
418 /* If we already allocated, do nothing */
422 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
426 #ifdef CONFIG_DYNAMIC_FTRACE
427 functions = ftrace_update_tot_cnt;
430 * We do not know the number of functions that exist because
431 * dynamic tracing is what counts them. With past experience
432 * we have around 20K functions. That should be more than enough.
433 * It is highly unlikely we will execute every function in
439 pg = stat->start = stat->pages;
441 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
443 for (i = 0; i < pages; i++) {
444 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
455 unsigned long tmp = (unsigned long)pg;
461 free_page((unsigned long)stat->pages);
468 static int ftrace_profile_init_cpu(int cpu)
470 struct ftrace_profile_stat *stat;
473 stat = &per_cpu(ftrace_profile_stats, cpu);
476 /* If the profile is already created, simply reset it */
477 ftrace_profile_reset(stat);
482 * We are profiling all functions, but usually only a few thousand
483 * functions are hit. We'll make a hash of 1024 items.
485 size = FTRACE_PROFILE_HASH_SIZE;
487 stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
492 if (!ftrace_profile_bits) {
495 for (; size; size >>= 1)
496 ftrace_profile_bits++;
499 /* Preallocate the function profiling pages */
500 if (ftrace_profile_pages_init(stat) < 0) {
509 static int ftrace_profile_init(void)
514 for_each_online_cpu(cpu) {
515 ret = ftrace_profile_init_cpu(cpu);
523 /* interrupts must be disabled */
524 static struct ftrace_profile *
525 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
527 struct ftrace_profile *rec;
528 struct hlist_head *hhd;
529 struct hlist_node *n;
532 key = hash_long(ip, ftrace_profile_bits);
533 hhd = &stat->hash[key];
535 if (hlist_empty(hhd))
538 hlist_for_each_entry_rcu(rec, n, hhd, node) {
546 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
547 struct ftrace_profile *rec)
551 key = hash_long(rec->ip, ftrace_profile_bits);
552 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
556 * The memory is already allocated, this simply finds a new record to use.
558 static struct ftrace_profile *
559 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
561 struct ftrace_profile *rec = NULL;
563 /* prevent recursion (from NMIs) */
564 if (atomic_inc_return(&stat->disabled) != 1)
568 * Try to find the function again since an NMI
569 * could have added it
571 rec = ftrace_find_profiled_func(stat, ip);
575 if (stat->pages->index == PROFILES_PER_PAGE) {
576 if (!stat->pages->next)
578 stat->pages = stat->pages->next;
581 rec = &stat->pages->records[stat->pages->index++];
583 ftrace_add_profile(stat, rec);
586 atomic_dec(&stat->disabled);
592 function_profile_call(unsigned long ip, unsigned long parent_ip)
594 struct ftrace_profile_stat *stat;
595 struct ftrace_profile *rec;
598 if (!ftrace_profile_enabled)
601 local_irq_save(flags);
603 stat = &__get_cpu_var(ftrace_profile_stats);
604 if (!stat->hash || !ftrace_profile_enabled)
607 rec = ftrace_find_profiled_func(stat, ip);
609 rec = ftrace_profile_alloc(stat, ip);
616 local_irq_restore(flags);
619 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
620 static int profile_graph_entry(struct ftrace_graph_ent *trace)
622 function_profile_call(trace->func, 0);
626 static void profile_graph_return(struct ftrace_graph_ret *trace)
628 struct ftrace_profile_stat *stat;
629 unsigned long long calltime;
630 struct ftrace_profile *rec;
633 local_irq_save(flags);
634 stat = &__get_cpu_var(ftrace_profile_stats);
635 if (!stat->hash || !ftrace_profile_enabled)
638 calltime = trace->rettime - trace->calltime;
640 if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
643 index = trace->depth;
645 /* Append this call time to the parent time to subtract */
647 current->ret_stack[index - 1].subtime += calltime;
649 if (current->ret_stack[index].subtime < calltime)
650 calltime -= current->ret_stack[index].subtime;
655 rec = ftrace_find_profiled_func(stat, trace->func);
657 rec->time += calltime;
660 local_irq_restore(flags);
663 static int register_ftrace_profiler(void)
665 return register_ftrace_graph(&profile_graph_return,
666 &profile_graph_entry);
669 static void unregister_ftrace_profiler(void)
671 unregister_ftrace_graph();
674 static struct ftrace_ops ftrace_profile_ops __read_mostly =
676 .func = function_profile_call,
679 static int register_ftrace_profiler(void)
681 return register_ftrace_function(&ftrace_profile_ops);
684 static void unregister_ftrace_profiler(void)
686 unregister_ftrace_function(&ftrace_profile_ops);
688 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
691 ftrace_profile_write(struct file *filp, const char __user *ubuf,
692 size_t cnt, loff_t *ppos)
695 char buf[64]; /* big enough to hold a number */
698 if (cnt >= sizeof(buf))
701 if (copy_from_user(&buf, ubuf, cnt))
706 ret = strict_strtoul(buf, 10, &val);
712 mutex_lock(&ftrace_profile_lock);
713 if (ftrace_profile_enabled ^ val) {
715 ret = ftrace_profile_init();
721 ret = register_ftrace_profiler();
726 ftrace_profile_enabled = 1;
728 ftrace_profile_enabled = 0;
730 * unregister_ftrace_profiler calls stop_machine
731 * so this acts like an synchronize_sched.
733 unregister_ftrace_profiler();
737 mutex_unlock(&ftrace_profile_lock);
745 ftrace_profile_read(struct file *filp, char __user *ubuf,
746 size_t cnt, loff_t *ppos)
748 char buf[64]; /* big enough to hold a number */
751 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
752 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
755 static const struct file_operations ftrace_profile_fops = {
756 .open = tracing_open_generic,
757 .read = ftrace_profile_read,
758 .write = ftrace_profile_write,
761 /* used to initialize the real stat files */
762 static struct tracer_stat function_stats __initdata = {
764 .stat_start = function_stat_start,
765 .stat_next = function_stat_next,
766 .stat_cmp = function_stat_cmp,
767 .stat_headers = function_stat_headers,
768 .stat_show = function_stat_show
771 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
773 struct ftrace_profile_stat *stat;
774 struct dentry *entry;
779 for_each_possible_cpu(cpu) {
780 stat = &per_cpu(ftrace_profile_stats, cpu);
782 /* allocate enough for function name + cpu number */
783 name = kmalloc(32, GFP_KERNEL);
786 * The files created are permanent, if something happens
787 * we still do not free memory.
790 "Could not allocate stat file for cpu %d\n",
794 stat->stat = function_stats;
795 snprintf(name, 32, "function%d", cpu);
796 stat->stat.name = name;
797 ret = register_stat_tracer(&stat->stat);
800 "Could not register function stat for cpu %d\n",
807 entry = debugfs_create_file("function_profile_enabled", 0644,
808 d_tracer, NULL, &ftrace_profile_fops);
810 pr_warning("Could not create debugfs "
811 "'function_profile_enabled' entry\n");
814 #else /* CONFIG_FUNCTION_PROFILER */
815 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
818 #endif /* CONFIG_FUNCTION_PROFILER */
820 /* set when tracing only a pid */
821 struct pid *ftrace_pid_trace;
822 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
824 #ifdef CONFIG_DYNAMIC_FTRACE
826 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
827 # error Dynamic ftrace depends on MCOUNT_RECORD
830 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
832 struct ftrace_func_probe {
833 struct hlist_node node;
834 struct ftrace_probe_ops *ops;
842 FTRACE_ENABLE_CALLS = (1 << 0),
843 FTRACE_DISABLE_CALLS = (1 << 1),
844 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
845 FTRACE_ENABLE_MCOUNT = (1 << 3),
846 FTRACE_DISABLE_MCOUNT = (1 << 4),
847 FTRACE_START_FUNC_RET = (1 << 5),
848 FTRACE_STOP_FUNC_RET = (1 << 6),
851 static int ftrace_filtered;
853 static struct dyn_ftrace *ftrace_new_addrs;
855 static DEFINE_MUTEX(ftrace_regex_lock);
858 struct ftrace_page *next;
860 struct dyn_ftrace records[];
863 #define ENTRIES_PER_PAGE \
864 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
866 /* estimate from running different kernels */
867 #define NR_TO_INIT 10000
869 static struct ftrace_page *ftrace_pages_start;
870 static struct ftrace_page *ftrace_pages;
872 static struct dyn_ftrace *ftrace_free_records;
875 * This is a double for. Do not use 'break' to break out of the loop,
876 * you must use a goto.
878 #define do_for_each_ftrace_rec(pg, rec) \
879 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
881 for (_____i = 0; _____i < pg->index; _____i++) { \
882 rec = &pg->records[_____i];
884 #define while_for_each_ftrace_rec() \
888 #ifdef CONFIG_KPROBES
890 static int frozen_record_count;
892 static inline void freeze_record(struct dyn_ftrace *rec)
894 if (!(rec->flags & FTRACE_FL_FROZEN)) {
895 rec->flags |= FTRACE_FL_FROZEN;
896 frozen_record_count++;
900 static inline void unfreeze_record(struct dyn_ftrace *rec)
902 if (rec->flags & FTRACE_FL_FROZEN) {
903 rec->flags &= ~FTRACE_FL_FROZEN;
904 frozen_record_count--;
908 static inline int record_frozen(struct dyn_ftrace *rec)
910 return rec->flags & FTRACE_FL_FROZEN;
913 # define freeze_record(rec) ({ 0; })
914 # define unfreeze_record(rec) ({ 0; })
915 # define record_frozen(rec) ({ 0; })
916 #endif /* CONFIG_KPROBES */
918 static void ftrace_free_rec(struct dyn_ftrace *rec)
920 rec->freelist = ftrace_free_records;
921 ftrace_free_records = rec;
922 rec->flags |= FTRACE_FL_FREE;
925 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
927 struct dyn_ftrace *rec;
929 /* First check for freed records */
930 if (ftrace_free_records) {
931 rec = ftrace_free_records;
933 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
934 FTRACE_WARN_ON_ONCE(1);
935 ftrace_free_records = NULL;
939 ftrace_free_records = rec->freelist;
940 memset(rec, 0, sizeof(*rec));
944 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
945 if (!ftrace_pages->next) {
946 /* allocate another page */
948 (void *)get_zeroed_page(GFP_KERNEL);
949 if (!ftrace_pages->next)
952 ftrace_pages = ftrace_pages->next;
955 return &ftrace_pages->records[ftrace_pages->index++];
958 static struct dyn_ftrace *
959 ftrace_record_ip(unsigned long ip)
961 struct dyn_ftrace *rec;
966 rec = ftrace_alloc_dyn_node(ip);
971 rec->newlist = ftrace_new_addrs;
972 ftrace_new_addrs = rec;
977 static void print_ip_ins(const char *fmt, unsigned char *p)
981 printk(KERN_CONT "%s", fmt);
983 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
984 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
987 static void ftrace_bug(int failed, unsigned long ip)
991 FTRACE_WARN_ON_ONCE(1);
992 pr_info("ftrace faulted on modifying ");
996 FTRACE_WARN_ON_ONCE(1);
997 pr_info("ftrace failed to modify ");
999 print_ip_ins(" actual: ", (unsigned char *)ip);
1000 printk(KERN_CONT "\n");
1003 FTRACE_WARN_ON_ONCE(1);
1004 pr_info("ftrace faulted on writing ");
1008 FTRACE_WARN_ON_ONCE(1);
1009 pr_info("ftrace faulted on unknown error ");
1016 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1018 unsigned long ftrace_addr;
1019 unsigned long flag = 0UL;
1021 ftrace_addr = (unsigned long)FTRACE_ADDR;
1024 * If this record is not to be traced or we want to disable it,
1027 * If we want to enable it and filtering is off, then enable it.
1029 * If we want to enable it and filtering is on, enable it only if
1032 if (enable && !(rec->flags & FTRACE_FL_NOTRACE)) {
1033 if (!ftrace_filtered || (rec->flags & FTRACE_FL_FILTER))
1034 flag = FTRACE_FL_ENABLED;
1037 /* If the state of this record hasn't changed, then do nothing */
1038 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1042 rec->flags |= FTRACE_FL_ENABLED;
1043 return ftrace_make_call(rec, ftrace_addr);
1046 rec->flags &= ~FTRACE_FL_ENABLED;
1047 return ftrace_make_nop(NULL, rec, ftrace_addr);
1050 static void ftrace_replace_code(int enable)
1052 struct dyn_ftrace *rec;
1053 struct ftrace_page *pg;
1056 do_for_each_ftrace_rec(pg, rec) {
1058 * Skip over free records, records that have
1059 * failed and not converted.
1061 if (rec->flags & FTRACE_FL_FREE ||
1062 rec->flags & FTRACE_FL_FAILED ||
1063 !(rec->flags & FTRACE_FL_CONVERTED))
1066 /* ignore updates to this record's mcount site */
1067 if (get_kprobe((void *)rec->ip)) {
1071 unfreeze_record(rec);
1074 failed = __ftrace_replace_code(rec, enable);
1076 rec->flags |= FTRACE_FL_FAILED;
1077 if ((system_state == SYSTEM_BOOTING) ||
1078 !core_kernel_text(rec->ip)) {
1079 ftrace_free_rec(rec);
1081 ftrace_bug(failed, rec->ip);
1082 /* Stop processing */
1086 } while_for_each_ftrace_rec();
1090 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1097 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1099 ftrace_bug(ret, ip);
1100 rec->flags |= FTRACE_FL_FAILED;
1107 * archs can override this function if they must do something
1108 * before the modifying code is performed.
1110 int __weak ftrace_arch_code_modify_prepare(void)
1116 * archs can override this function if they must do something
1117 * after the modifying code is performed.
1119 int __weak ftrace_arch_code_modify_post_process(void)
1124 static int __ftrace_modify_code(void *data)
1126 int *command = data;
1128 if (*command & FTRACE_ENABLE_CALLS)
1129 ftrace_replace_code(1);
1130 else if (*command & FTRACE_DISABLE_CALLS)
1131 ftrace_replace_code(0);
1133 if (*command & FTRACE_UPDATE_TRACE_FUNC)
1134 ftrace_update_ftrace_func(ftrace_trace_function);
1136 if (*command & FTRACE_START_FUNC_RET)
1137 ftrace_enable_ftrace_graph_caller();
1138 else if (*command & FTRACE_STOP_FUNC_RET)
1139 ftrace_disable_ftrace_graph_caller();
1144 static void ftrace_run_update_code(int command)
1148 ret = ftrace_arch_code_modify_prepare();
1149 FTRACE_WARN_ON(ret);
1153 stop_machine(__ftrace_modify_code, &command, NULL);
1155 ret = ftrace_arch_code_modify_post_process();
1156 FTRACE_WARN_ON(ret);
1159 static ftrace_func_t saved_ftrace_func;
1160 static int ftrace_start_up;
1162 static void ftrace_startup_enable(int command)
1164 if (saved_ftrace_func != ftrace_trace_function) {
1165 saved_ftrace_func = ftrace_trace_function;
1166 command |= FTRACE_UPDATE_TRACE_FUNC;
1169 if (!command || !ftrace_enabled)
1172 ftrace_run_update_code(command);
1175 static void ftrace_startup(int command)
1177 if (unlikely(ftrace_disabled))
1181 command |= FTRACE_ENABLE_CALLS;
1183 ftrace_startup_enable(command);
1186 static void ftrace_shutdown(int command)
1188 if (unlikely(ftrace_disabled))
1193 * Just warn in case of unbalance, no need to kill ftrace, it's not
1194 * critical but the ftrace_call callers may be never nopped again after
1195 * further ftrace uses.
1197 WARN_ON_ONCE(ftrace_start_up < 0);
1199 if (!ftrace_start_up)
1200 command |= FTRACE_DISABLE_CALLS;
1202 if (saved_ftrace_func != ftrace_trace_function) {
1203 saved_ftrace_func = ftrace_trace_function;
1204 command |= FTRACE_UPDATE_TRACE_FUNC;
1207 if (!command || !ftrace_enabled)
1210 ftrace_run_update_code(command);
1213 static void ftrace_startup_sysctl(void)
1215 int command = FTRACE_ENABLE_MCOUNT;
1217 if (unlikely(ftrace_disabled))
1220 /* Force update next time */
1221 saved_ftrace_func = NULL;
1222 /* ftrace_start_up is true if we want ftrace running */
1223 if (ftrace_start_up)
1224 command |= FTRACE_ENABLE_CALLS;
1226 ftrace_run_update_code(command);
1229 static void ftrace_shutdown_sysctl(void)
1231 int command = FTRACE_DISABLE_MCOUNT;
1233 if (unlikely(ftrace_disabled))
1236 /* ftrace_start_up is true if ftrace is running */
1237 if (ftrace_start_up)
1238 command |= FTRACE_DISABLE_CALLS;
1240 ftrace_run_update_code(command);
1243 static cycle_t ftrace_update_time;
1244 static unsigned long ftrace_update_cnt;
1245 unsigned long ftrace_update_tot_cnt;
1247 static int ftrace_update_code(struct module *mod)
1249 struct dyn_ftrace *p;
1250 cycle_t start, stop;
1252 start = ftrace_now(raw_smp_processor_id());
1253 ftrace_update_cnt = 0;
1255 while (ftrace_new_addrs) {
1257 /* If something went wrong, bail without enabling anything */
1258 if (unlikely(ftrace_disabled))
1261 p = ftrace_new_addrs;
1262 ftrace_new_addrs = p->newlist;
1265 /* convert record (i.e, patch mcount-call with NOP) */
1266 if (ftrace_code_disable(mod, p)) {
1267 p->flags |= FTRACE_FL_CONVERTED;
1268 ftrace_update_cnt++;
1273 stop = ftrace_now(raw_smp_processor_id());
1274 ftrace_update_time = stop - start;
1275 ftrace_update_tot_cnt += ftrace_update_cnt;
1280 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
1282 struct ftrace_page *pg;
1286 /* allocate a few pages */
1287 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
1288 if (!ftrace_pages_start)
1292 * Allocate a few more pages.
1294 * TODO: have some parser search vmlinux before
1295 * final linking to find all calls to ftrace.
1297 * a) know how many pages to allocate.
1299 * b) set up the table then.
1301 * The dynamic code is still necessary for
1305 pg = ftrace_pages = ftrace_pages_start;
1307 cnt = num_to_init / ENTRIES_PER_PAGE;
1308 pr_info("ftrace: allocating %ld entries in %d pages\n",
1309 num_to_init, cnt + 1);
1311 for (i = 0; i < cnt; i++) {
1312 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
1314 /* If we fail, we'll try later anyway */
1325 FTRACE_ITER_FILTER = (1 << 0),
1326 FTRACE_ITER_NOTRACE = (1 << 1),
1327 FTRACE_ITER_FAILURES = (1 << 2),
1328 FTRACE_ITER_PRINTALL = (1 << 3),
1329 FTRACE_ITER_HASH = (1 << 4),
1332 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
1334 struct ftrace_iterator {
1335 struct ftrace_page *pg;
1339 struct trace_parser parser;
1343 t_hash_next(struct seq_file *m, void *v, loff_t *pos)
1345 struct ftrace_iterator *iter = m->private;
1346 struct hlist_node *hnd = v;
1347 struct hlist_head *hhd;
1349 WARN_ON(!(iter->flags & FTRACE_ITER_HASH));
1354 if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
1357 hhd = &ftrace_func_hash[iter->hidx];
1359 if (hlist_empty(hhd)) {
1378 static void *t_hash_start(struct seq_file *m, loff_t *pos)
1380 struct ftrace_iterator *iter = m->private;
1384 if (!(iter->flags & FTRACE_ITER_HASH))
1387 iter->flags |= FTRACE_ITER_HASH;
1390 for (l = 0; l <= *pos; ) {
1391 p = t_hash_next(m, p, &l);
1398 static int t_hash_show(struct seq_file *m, void *v)
1400 struct ftrace_func_probe *rec;
1401 struct hlist_node *hnd = v;
1403 rec = hlist_entry(hnd, struct ftrace_func_probe, node);
1405 if (rec->ops->print)
1406 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
1408 seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
1411 seq_printf(m, ":%p", rec->data);
1418 t_next(struct seq_file *m, void *v, loff_t *pos)
1420 struct ftrace_iterator *iter = m->private;
1421 struct dyn_ftrace *rec = NULL;
1423 if (iter->flags & FTRACE_ITER_HASH)
1424 return t_hash_next(m, v, pos);
1428 if (iter->flags & FTRACE_ITER_PRINTALL)
1432 if (iter->idx >= iter->pg->index) {
1433 if (iter->pg->next) {
1434 iter->pg = iter->pg->next;
1439 rec = &iter->pg->records[iter->idx++];
1440 if ((rec->flags & FTRACE_FL_FREE) ||
1442 (!(iter->flags & FTRACE_ITER_FAILURES) &&
1443 (rec->flags & FTRACE_FL_FAILED)) ||
1445 ((iter->flags & FTRACE_ITER_FAILURES) &&
1446 !(rec->flags & FTRACE_FL_FAILED)) ||
1448 ((iter->flags & FTRACE_ITER_FILTER) &&
1449 !(rec->flags & FTRACE_FL_FILTER)) ||
1451 ((iter->flags & FTRACE_ITER_NOTRACE) &&
1452 !(rec->flags & FTRACE_FL_NOTRACE))) {
1461 static void *t_start(struct seq_file *m, loff_t *pos)
1463 struct ftrace_iterator *iter = m->private;
1467 mutex_lock(&ftrace_lock);
1469 * For set_ftrace_filter reading, if we have the filter
1470 * off, we can short cut and just print out that all
1471 * functions are enabled.
1473 if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
1475 return t_hash_start(m, pos);
1476 iter->flags |= FTRACE_ITER_PRINTALL;
1480 if (iter->flags & FTRACE_ITER_HASH)
1481 return t_hash_start(m, pos);
1483 iter->pg = ftrace_pages_start;
1485 for (l = 0; l <= *pos; ) {
1486 p = t_next(m, p, &l);
1491 if (!p && iter->flags & FTRACE_ITER_FILTER)
1492 return t_hash_start(m, pos);
1497 static void t_stop(struct seq_file *m, void *p)
1499 mutex_unlock(&ftrace_lock);
1502 static int t_show(struct seq_file *m, void *v)
1504 struct ftrace_iterator *iter = m->private;
1505 struct dyn_ftrace *rec = v;
1507 if (iter->flags & FTRACE_ITER_HASH)
1508 return t_hash_show(m, v);
1510 if (iter->flags & FTRACE_ITER_PRINTALL) {
1511 seq_printf(m, "#### all functions enabled ####\n");
1518 seq_printf(m, "%ps\n", (void *)rec->ip);
1523 static struct seq_operations show_ftrace_seq_ops = {
1531 ftrace_avail_open(struct inode *inode, struct file *file)
1533 struct ftrace_iterator *iter;
1536 if (unlikely(ftrace_disabled))
1539 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1543 iter->pg = ftrace_pages_start;
1545 ret = seq_open(file, &show_ftrace_seq_ops);
1547 struct seq_file *m = file->private_data;
1558 ftrace_failures_open(struct inode *inode, struct file *file)
1562 struct ftrace_iterator *iter;
1564 ret = ftrace_avail_open(inode, file);
1566 m = (struct seq_file *)file->private_data;
1567 iter = (struct ftrace_iterator *)m->private;
1568 iter->flags = FTRACE_ITER_FAILURES;
1575 static void ftrace_filter_reset(int enable)
1577 struct ftrace_page *pg;
1578 struct dyn_ftrace *rec;
1579 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1581 mutex_lock(&ftrace_lock);
1583 ftrace_filtered = 0;
1584 do_for_each_ftrace_rec(pg, rec) {
1585 if (rec->flags & FTRACE_FL_FAILED)
1587 rec->flags &= ~type;
1588 } while_for_each_ftrace_rec();
1589 mutex_unlock(&ftrace_lock);
1593 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1595 struct ftrace_iterator *iter;
1598 if (unlikely(ftrace_disabled))
1601 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1605 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
1610 mutex_lock(&ftrace_regex_lock);
1611 if ((file->f_mode & FMODE_WRITE) &&
1612 (file->f_flags & O_TRUNC))
1613 ftrace_filter_reset(enable);
1615 if (file->f_mode & FMODE_READ) {
1616 iter->pg = ftrace_pages_start;
1617 iter->flags = enable ? FTRACE_ITER_FILTER :
1618 FTRACE_ITER_NOTRACE;
1620 ret = seq_open(file, &show_ftrace_seq_ops);
1622 struct seq_file *m = file->private_data;
1627 file->private_data = iter;
1628 mutex_unlock(&ftrace_regex_lock);
1634 ftrace_filter_open(struct inode *inode, struct file *file)
1636 return ftrace_regex_open(inode, file, 1);
1640 ftrace_notrace_open(struct inode *inode, struct file *file)
1642 return ftrace_regex_open(inode, file, 0);
1646 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1650 if (file->f_mode & FMODE_READ)
1651 ret = seq_lseek(file, offset, origin);
1653 file->f_pos = ret = 1;
1658 static int ftrace_match(char *str, char *regex, int len, int type)
1665 if (strcmp(str, regex) == 0)
1668 case MATCH_FRONT_ONLY:
1669 if (strncmp(str, regex, len) == 0)
1672 case MATCH_MIDDLE_ONLY:
1673 if (strstr(str, regex))
1676 case MATCH_END_ONLY:
1677 ptr = strstr(str, regex);
1678 if (ptr && (ptr[len] == 0))
1687 ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
1689 char str[KSYM_SYMBOL_LEN];
1691 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1692 return ftrace_match(str, regex, len, type);
1695 static void ftrace_match_records(char *buff, int len, int enable)
1697 unsigned int search_len;
1698 struct ftrace_page *pg;
1699 struct dyn_ftrace *rec;
1705 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1706 type = filter_parse_regex(buff, len, &search, ¬);
1708 search_len = strlen(search);
1710 mutex_lock(&ftrace_lock);
1711 do_for_each_ftrace_rec(pg, rec) {
1713 if (rec->flags & FTRACE_FL_FAILED)
1716 if (ftrace_match_record(rec, search, search_len, type)) {
1718 rec->flags &= ~flag;
1723 * Only enable filtering if we have a function that
1726 if (enable && (rec->flags & FTRACE_FL_FILTER))
1727 ftrace_filtered = 1;
1728 } while_for_each_ftrace_rec();
1729 mutex_unlock(&ftrace_lock);
1733 ftrace_match_module_record(struct dyn_ftrace *rec, char *mod,
1734 char *regex, int len, int type)
1736 char str[KSYM_SYMBOL_LEN];
1739 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
1741 if (!modname || strcmp(modname, mod))
1744 /* blank search means to match all funcs in the mod */
1746 return ftrace_match(str, regex, len, type);
1751 static void ftrace_match_module_records(char *buff, char *mod, int enable)
1753 unsigned search_len = 0;
1754 struct ftrace_page *pg;
1755 struct dyn_ftrace *rec;
1756 int type = MATCH_FULL;
1757 char *search = buff;
1761 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1763 /* blank or '*' mean the same */
1764 if (strcmp(buff, "*") == 0)
1767 /* handle the case of 'dont filter this module' */
1768 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
1774 type = filter_parse_regex(buff, strlen(buff), &search, ¬);
1775 search_len = strlen(search);
1778 mutex_lock(&ftrace_lock);
1779 do_for_each_ftrace_rec(pg, rec) {
1781 if (rec->flags & FTRACE_FL_FAILED)
1784 if (ftrace_match_module_record(rec, mod,
1785 search, search_len, type)) {
1787 rec->flags &= ~flag;
1791 if (enable && (rec->flags & FTRACE_FL_FILTER))
1792 ftrace_filtered = 1;
1794 } while_for_each_ftrace_rec();
1795 mutex_unlock(&ftrace_lock);
1799 * We register the module command as a template to show others how
1800 * to register the a command as well.
1804 ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
1809 * cmd == 'mod' because we only registered this func
1810 * for the 'mod' ftrace_func_command.
1811 * But if you register one func with multiple commands,
1812 * you can tell which command was used by the cmd
1816 /* we must have a module name */
1820 mod = strsep(¶m, ":");
1824 ftrace_match_module_records(func, mod, enable);
1828 static struct ftrace_func_command ftrace_mod_cmd = {
1830 .func = ftrace_mod_callback,
1833 static int __init ftrace_mod_cmd_init(void)
1835 return register_ftrace_command(&ftrace_mod_cmd);
1837 device_initcall(ftrace_mod_cmd_init);
1840 function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
1842 struct ftrace_func_probe *entry;
1843 struct hlist_head *hhd;
1844 struct hlist_node *n;
1848 key = hash_long(ip, FTRACE_HASH_BITS);
1850 hhd = &ftrace_func_hash[key];
1852 if (hlist_empty(hhd))
1856 * Disable preemption for these calls to prevent a RCU grace
1857 * period. This syncs the hash iteration and freeing of items
1858 * on the hash. rcu_read_lock is too dangerous here.
1860 resched = ftrace_preempt_disable();
1861 hlist_for_each_entry_rcu(entry, n, hhd, node) {
1862 if (entry->ip == ip)
1863 entry->ops->func(ip, parent_ip, &entry->data);
1865 ftrace_preempt_enable(resched);
1868 static struct ftrace_ops trace_probe_ops __read_mostly =
1870 .func = function_trace_probe_call,
1873 static int ftrace_probe_registered;
1875 static void __enable_ftrace_function_probe(void)
1879 if (ftrace_probe_registered)
1882 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1883 struct hlist_head *hhd = &ftrace_func_hash[i];
1887 /* Nothing registered? */
1888 if (i == FTRACE_FUNC_HASHSIZE)
1891 __register_ftrace_function(&trace_probe_ops);
1893 ftrace_probe_registered = 1;
1896 static void __disable_ftrace_function_probe(void)
1900 if (!ftrace_probe_registered)
1903 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1904 struct hlist_head *hhd = &ftrace_func_hash[i];
1909 /* no more funcs left */
1910 __unregister_ftrace_function(&trace_probe_ops);
1912 ftrace_probe_registered = 0;
1916 static void ftrace_free_entry_rcu(struct rcu_head *rhp)
1918 struct ftrace_func_probe *entry =
1919 container_of(rhp, struct ftrace_func_probe, rcu);
1921 if (entry->ops->free)
1922 entry->ops->free(&entry->data);
1928 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
1931 struct ftrace_func_probe *entry;
1932 struct ftrace_page *pg;
1933 struct dyn_ftrace *rec;
1939 type = filter_parse_regex(glob, strlen(glob), &search, ¬);
1940 len = strlen(search);
1942 /* we do not support '!' for function probes */
1946 mutex_lock(&ftrace_lock);
1947 do_for_each_ftrace_rec(pg, rec) {
1949 if (rec->flags & FTRACE_FL_FAILED)
1952 if (!ftrace_match_record(rec, search, len, type))
1955 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1957 /* If we did not process any, then return error */
1968 * The caller might want to do something special
1969 * for each function we find. We call the callback
1970 * to give the caller an opportunity to do so.
1972 if (ops->callback) {
1973 if (ops->callback(rec->ip, &entry->data) < 0) {
1974 /* caller does not like this func */
1981 entry->ip = rec->ip;
1983 key = hash_long(entry->ip, FTRACE_HASH_BITS);
1984 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
1986 } while_for_each_ftrace_rec();
1987 __enable_ftrace_function_probe();
1990 mutex_unlock(&ftrace_lock);
1996 PROBE_TEST_FUNC = 1,
2001 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2002 void *data, int flags)
2004 struct ftrace_func_probe *entry;
2005 struct hlist_node *n, *tmp;
2006 char str[KSYM_SYMBOL_LEN];
2007 int type = MATCH_FULL;
2011 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
2016 type = filter_parse_regex(glob, strlen(glob), &search, ¬);
2017 len = strlen(search);
2019 /* we do not support '!' for function probes */
2024 mutex_lock(&ftrace_lock);
2025 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2026 struct hlist_head *hhd = &ftrace_func_hash[i];
2028 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
2030 /* break up if statements for readability */
2031 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
2034 if ((flags & PROBE_TEST_DATA) && entry->data != data)
2037 /* do this last, since it is the most expensive */
2039 kallsyms_lookup(entry->ip, NULL, NULL,
2041 if (!ftrace_match(str, glob, len, type))
2045 hlist_del(&entry->node);
2046 call_rcu(&entry->rcu, ftrace_free_entry_rcu);
2049 __disable_ftrace_function_probe();
2050 mutex_unlock(&ftrace_lock);
2054 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2057 __unregister_ftrace_function_probe(glob, ops, data,
2058 PROBE_TEST_FUNC | PROBE_TEST_DATA);
2062 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
2064 __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
2067 void unregister_ftrace_function_probe_all(char *glob)
2069 __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
2072 static LIST_HEAD(ftrace_commands);
2073 static DEFINE_MUTEX(ftrace_cmd_mutex);
2075 int register_ftrace_command(struct ftrace_func_command *cmd)
2077 struct ftrace_func_command *p;
2080 mutex_lock(&ftrace_cmd_mutex);
2081 list_for_each_entry(p, &ftrace_commands, list) {
2082 if (strcmp(cmd->name, p->name) == 0) {
2087 list_add(&cmd->list, &ftrace_commands);
2089 mutex_unlock(&ftrace_cmd_mutex);
2094 int unregister_ftrace_command(struct ftrace_func_command *cmd)
2096 struct ftrace_func_command *p, *n;
2099 mutex_lock(&ftrace_cmd_mutex);
2100 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
2101 if (strcmp(cmd->name, p->name) == 0) {
2103 list_del_init(&p->list);
2108 mutex_unlock(&ftrace_cmd_mutex);
2113 static int ftrace_process_regex(char *buff, int len, int enable)
2115 char *func, *command, *next = buff;
2116 struct ftrace_func_command *p;
2119 func = strsep(&next, ":");
2122 ftrace_match_records(func, len, enable);
2128 command = strsep(&next, ":");
2130 mutex_lock(&ftrace_cmd_mutex);
2131 list_for_each_entry(p, &ftrace_commands, list) {
2132 if (strcmp(p->name, command) == 0) {
2133 ret = p->func(func, command, next, enable);
2138 mutex_unlock(&ftrace_cmd_mutex);
2144 ftrace_regex_write(struct file *file, const char __user *ubuf,
2145 size_t cnt, loff_t *ppos, int enable)
2147 struct ftrace_iterator *iter;
2148 struct trace_parser *parser;
2151 if (!cnt || cnt < 0)
2154 mutex_lock(&ftrace_regex_lock);
2156 if (file->f_mode & FMODE_READ) {
2157 struct seq_file *m = file->private_data;
2160 iter = file->private_data;
2162 parser = &iter->parser;
2163 read = trace_get_user(parser, ubuf, cnt, ppos);
2165 if (trace_parser_loaded(parser) &&
2166 !trace_parser_cont(parser)) {
2167 ret = ftrace_process_regex(parser->buffer,
2168 parser->idx, enable);
2172 trace_parser_clear(parser);
2177 mutex_unlock(&ftrace_regex_lock);
2183 ftrace_filter_write(struct file *file, const char __user *ubuf,
2184 size_t cnt, loff_t *ppos)
2186 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
2190 ftrace_notrace_write(struct file *file, const char __user *ubuf,
2191 size_t cnt, loff_t *ppos)
2193 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
2197 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
2199 if (unlikely(ftrace_disabled))
2202 mutex_lock(&ftrace_regex_lock);
2204 ftrace_filter_reset(enable);
2206 ftrace_match_records(buf, len, enable);
2207 mutex_unlock(&ftrace_regex_lock);
2211 * ftrace_set_filter - set a function to filter on in ftrace
2212 * @buf - the string that holds the function filter text.
2213 * @len - the length of the string.
2214 * @reset - non zero to reset all filters before applying this filter.
2216 * Filters denote which functions should be enabled when tracing is enabled.
2217 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2219 void ftrace_set_filter(unsigned char *buf, int len, int reset)
2221 ftrace_set_regex(buf, len, reset, 1);
2225 * ftrace_set_notrace - set a function to not trace in ftrace
2226 * @buf - the string that holds the function notrace text.
2227 * @len - the length of the string.
2228 * @reset - non zero to reset all filters before applying this filter.
2230 * Notrace Filters denote which functions should not be enabled when tracing
2231 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2234 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
2236 ftrace_set_regex(buf, len, reset, 0);
2240 * command line interface to allow users to set filters on boot up.
2242 #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
2243 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
2244 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
2246 static int __init set_ftrace_notrace(char *str)
2248 strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
2251 __setup("ftrace_notrace=", set_ftrace_notrace);
2253 static int __init set_ftrace_filter(char *str)
2255 strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
2258 __setup("ftrace_filter=", set_ftrace_filter);
2260 static void __init set_ftrace_early_filter(char *buf, int enable)
2265 func = strsep(&buf, ",");
2266 ftrace_set_regex(func, strlen(func), 0, enable);
2270 static void __init set_ftrace_early_filters(void)
2272 if (ftrace_filter_buf[0])
2273 set_ftrace_early_filter(ftrace_filter_buf, 1);
2274 if (ftrace_notrace_buf[0])
2275 set_ftrace_early_filter(ftrace_notrace_buf, 0);
2279 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
2281 struct seq_file *m = (struct seq_file *)file->private_data;
2282 struct ftrace_iterator *iter;
2283 struct trace_parser *parser;
2285 mutex_lock(&ftrace_regex_lock);
2286 if (file->f_mode & FMODE_READ) {
2289 seq_release(inode, file);
2291 iter = file->private_data;
2293 parser = &iter->parser;
2294 if (trace_parser_loaded(parser)) {
2295 parser->buffer[parser->idx] = 0;
2296 ftrace_match_records(parser->buffer, parser->idx, enable);
2299 mutex_lock(&ftrace_lock);
2300 if (ftrace_start_up && ftrace_enabled)
2301 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
2302 mutex_unlock(&ftrace_lock);
2304 trace_parser_put(parser);
2307 mutex_unlock(&ftrace_regex_lock);
2312 ftrace_filter_release(struct inode *inode, struct file *file)
2314 return ftrace_regex_release(inode, file, 1);
2318 ftrace_notrace_release(struct inode *inode, struct file *file)
2320 return ftrace_regex_release(inode, file, 0);
2323 static const struct file_operations ftrace_avail_fops = {
2324 .open = ftrace_avail_open,
2326 .llseek = seq_lseek,
2327 .release = seq_release_private,
2330 static const struct file_operations ftrace_failures_fops = {
2331 .open = ftrace_failures_open,
2333 .llseek = seq_lseek,
2334 .release = seq_release_private,
2337 static const struct file_operations ftrace_filter_fops = {
2338 .open = ftrace_filter_open,
2340 .write = ftrace_filter_write,
2341 .llseek = ftrace_regex_lseek,
2342 .release = ftrace_filter_release,
2345 static const struct file_operations ftrace_notrace_fops = {
2346 .open = ftrace_notrace_open,
2348 .write = ftrace_notrace_write,
2349 .llseek = ftrace_regex_lseek,
2350 .release = ftrace_notrace_release,
2353 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2355 static DEFINE_MUTEX(graph_lock);
2357 int ftrace_graph_count;
2358 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
2361 __g_next(struct seq_file *m, loff_t *pos)
2363 unsigned long *array = m->private;
2365 if (*pos >= ftrace_graph_count)
2367 return &array[*pos];
2371 g_next(struct seq_file *m, void *v, loff_t *pos)
2374 return __g_next(m, pos);
2377 static void *g_start(struct seq_file *m, loff_t *pos)
2379 mutex_lock(&graph_lock);
2381 /* Nothing, tell g_show to print all functions are enabled */
2382 if (!ftrace_graph_count && !*pos)
2385 return __g_next(m, pos);
2388 static void g_stop(struct seq_file *m, void *p)
2390 mutex_unlock(&graph_lock);
2393 static int g_show(struct seq_file *m, void *v)
2395 unsigned long *ptr = v;
2400 if (ptr == (unsigned long *)1) {
2401 seq_printf(m, "#### all functions enabled ####\n");
2405 seq_printf(m, "%ps\n", (void *)*ptr);
2410 static struct seq_operations ftrace_graph_seq_ops = {
2418 ftrace_graph_open(struct inode *inode, struct file *file)
2422 if (unlikely(ftrace_disabled))
2425 mutex_lock(&graph_lock);
2426 if ((file->f_mode & FMODE_WRITE) &&
2427 (file->f_flags & O_TRUNC)) {
2428 ftrace_graph_count = 0;
2429 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
2432 if (file->f_mode & FMODE_READ) {
2433 ret = seq_open(file, &ftrace_graph_seq_ops);
2435 struct seq_file *m = file->private_data;
2436 m->private = ftrace_graph_funcs;
2439 file->private_data = ftrace_graph_funcs;
2440 mutex_unlock(&graph_lock);
2446 ftrace_graph_release(struct inode *inode, struct file *file)
2448 if (file->f_mode & FMODE_READ)
2449 seq_release(inode, file);
2454 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
2456 struct dyn_ftrace *rec;
2457 struct ftrace_page *pg;
2465 if (ftrace_disabled)
2469 type = filter_parse_regex(buffer, strlen(buffer), &search, ¬);
2473 search_len = strlen(search);
2475 mutex_lock(&ftrace_lock);
2476 do_for_each_ftrace_rec(pg, rec) {
2478 if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
2481 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
2484 if (ftrace_match_record(rec, search, search_len, type)) {
2485 /* ensure it is not already in the array */
2487 for (i = 0; i < *idx; i++)
2488 if (array[i] == rec->ip) {
2493 array[(*idx)++] = rec->ip;
2497 } while_for_each_ftrace_rec();
2499 mutex_unlock(&ftrace_lock);
2501 return found ? 0 : -EINVAL;
2505 ftrace_graph_write(struct file *file, const char __user *ubuf,
2506 size_t cnt, loff_t *ppos)
2508 struct trace_parser parser;
2509 unsigned long *array;
2513 if (!cnt || cnt < 0)
2516 mutex_lock(&graph_lock);
2518 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
2523 if (file->f_mode & FMODE_READ) {
2524 struct seq_file *m = file->private_data;
2527 array = file->private_data;
2529 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
2534 read = trace_get_user(&parser, ubuf, cnt, ppos);
2536 if (trace_parser_loaded((&parser))) {
2537 parser.buffer[parser.idx] = 0;
2539 /* we allow only one expression at a time */
2540 ret = ftrace_set_func(array, &ftrace_graph_count,
2548 trace_parser_put(&parser);
2549 mutex_unlock(&graph_lock);
2554 static const struct file_operations ftrace_graph_fops = {
2555 .open = ftrace_graph_open,
2557 .write = ftrace_graph_write,
2558 .release = ftrace_graph_release,
2560 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2562 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
2565 trace_create_file("available_filter_functions", 0444,
2566 d_tracer, NULL, &ftrace_avail_fops);
2568 trace_create_file("failures", 0444,
2569 d_tracer, NULL, &ftrace_failures_fops);
2571 trace_create_file("set_ftrace_filter", 0644, d_tracer,
2572 NULL, &ftrace_filter_fops);
2574 trace_create_file("set_ftrace_notrace", 0644, d_tracer,
2575 NULL, &ftrace_notrace_fops);
2577 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2578 trace_create_file("set_graph_function", 0444, d_tracer,
2580 &ftrace_graph_fops);
2581 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2586 static int ftrace_convert_nops(struct module *mod,
2587 unsigned long *start,
2592 unsigned long flags;
2594 mutex_lock(&ftrace_lock);
2597 addr = ftrace_call_adjust(*p++);
2599 * Some architecture linkers will pad between
2600 * the different mcount_loc sections of different
2601 * object files to satisfy alignments.
2602 * Skip any NULL pointers.
2606 ftrace_record_ip(addr);
2609 /* disable interrupts to prevent kstop machine */
2610 local_irq_save(flags);
2611 ftrace_update_code(mod);
2612 local_irq_restore(flags);
2613 mutex_unlock(&ftrace_lock);
2618 #ifdef CONFIG_MODULES
2619 void ftrace_release(void *start, void *end)
2621 struct dyn_ftrace *rec;
2622 struct ftrace_page *pg;
2623 unsigned long s = (unsigned long)start;
2624 unsigned long e = (unsigned long)end;
2626 if (ftrace_disabled || !start || start == end)
2629 mutex_lock(&ftrace_lock);
2630 do_for_each_ftrace_rec(pg, rec) {
2631 if ((rec->ip >= s) && (rec->ip < e)) {
2633 * rec->ip is changed in ftrace_free_rec()
2634 * It should not between s and e if record was freed.
2636 FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
2637 ftrace_free_rec(rec);
2639 } while_for_each_ftrace_rec();
2640 mutex_unlock(&ftrace_lock);
2643 static void ftrace_init_module(struct module *mod,
2644 unsigned long *start, unsigned long *end)
2646 if (ftrace_disabled || start == end)
2648 ftrace_convert_nops(mod, start, end);
2651 static int ftrace_module_notify(struct notifier_block *self,
2652 unsigned long val, void *data)
2654 struct module *mod = data;
2657 case MODULE_STATE_COMING:
2658 ftrace_init_module(mod, mod->ftrace_callsites,
2659 mod->ftrace_callsites +
2660 mod->num_ftrace_callsites);
2662 case MODULE_STATE_GOING:
2663 ftrace_release(mod->ftrace_callsites,
2664 mod->ftrace_callsites +
2665 mod->num_ftrace_callsites);
2672 static int ftrace_module_notify(struct notifier_block *self,
2673 unsigned long val, void *data)
2677 #endif /* CONFIG_MODULES */
2679 struct notifier_block ftrace_module_nb = {
2680 .notifier_call = ftrace_module_notify,
2684 extern unsigned long __start_mcount_loc[];
2685 extern unsigned long __stop_mcount_loc[];
2687 void __init ftrace_init(void)
2689 unsigned long count, addr, flags;
2692 /* Keep the ftrace pointer to the stub */
2693 addr = (unsigned long)ftrace_stub;
2695 local_irq_save(flags);
2696 ftrace_dyn_arch_init(&addr);
2697 local_irq_restore(flags);
2699 /* ftrace_dyn_arch_init places the return code in addr */
2703 count = __stop_mcount_loc - __start_mcount_loc;
2705 ret = ftrace_dyn_table_alloc(count);
2709 last_ftrace_enabled = ftrace_enabled = 1;
2711 ret = ftrace_convert_nops(NULL,
2715 ret = register_module_notifier(&ftrace_module_nb);
2717 pr_warning("Failed to register trace ftrace module notifier\n");
2719 set_ftrace_early_filters();
2723 ftrace_disabled = 1;
2728 static int __init ftrace_nodyn_init(void)
2733 device_initcall(ftrace_nodyn_init);
2735 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
2736 static inline void ftrace_startup_enable(int command) { }
2737 /* Keep as macros so we do not need to define the commands */
2738 # define ftrace_startup(command) do { } while (0)
2739 # define ftrace_shutdown(command) do { } while (0)
2740 # define ftrace_startup_sysctl() do { } while (0)
2741 # define ftrace_shutdown_sysctl() do { } while (0)
2742 #endif /* CONFIG_DYNAMIC_FTRACE */
2745 ftrace_pid_read(struct file *file, char __user *ubuf,
2746 size_t cnt, loff_t *ppos)
2751 if (ftrace_pid_trace == ftrace_swapper_pid)
2752 r = sprintf(buf, "swapper tasks\n");
2753 else if (ftrace_pid_trace)
2754 r = sprintf(buf, "%u\n", pid_vnr(ftrace_pid_trace));
2756 r = sprintf(buf, "no pid\n");
2758 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2761 static void clear_ftrace_swapper(void)
2763 struct task_struct *p;
2767 for_each_online_cpu(cpu) {
2769 clear_tsk_trace_trace(p);
2774 static void set_ftrace_swapper(void)
2776 struct task_struct *p;
2780 for_each_online_cpu(cpu) {
2782 set_tsk_trace_trace(p);
2787 static void clear_ftrace_pid(struct pid *pid)
2789 struct task_struct *p;
2792 do_each_pid_task(pid, PIDTYPE_PID, p) {
2793 clear_tsk_trace_trace(p);
2794 } while_each_pid_task(pid, PIDTYPE_PID, p);
2800 static void set_ftrace_pid(struct pid *pid)
2802 struct task_struct *p;
2805 do_each_pid_task(pid, PIDTYPE_PID, p) {
2806 set_tsk_trace_trace(p);
2807 } while_each_pid_task(pid, PIDTYPE_PID, p);
2811 static void clear_ftrace_pid_task(struct pid **pid)
2813 if (*pid == ftrace_swapper_pid)
2814 clear_ftrace_swapper();
2816 clear_ftrace_pid(*pid);
2821 static void set_ftrace_pid_task(struct pid *pid)
2823 if (pid == ftrace_swapper_pid)
2824 set_ftrace_swapper();
2826 set_ftrace_pid(pid);
2830 ftrace_pid_write(struct file *filp, const char __user *ubuf,
2831 size_t cnt, loff_t *ppos)
2838 if (cnt >= sizeof(buf))
2841 if (copy_from_user(&buf, ubuf, cnt))
2846 ret = strict_strtol(buf, 10, &val);
2850 mutex_lock(&ftrace_lock);
2852 /* disable pid tracing */
2853 if (!ftrace_pid_trace)
2856 clear_ftrace_pid_task(&ftrace_pid_trace);
2859 /* swapper task is special */
2861 pid = ftrace_swapper_pid;
2862 if (pid == ftrace_pid_trace)
2865 pid = find_get_pid(val);
2867 if (pid == ftrace_pid_trace) {
2873 if (ftrace_pid_trace)
2874 clear_ftrace_pid_task(&ftrace_pid_trace);
2879 ftrace_pid_trace = pid;
2881 set_ftrace_pid_task(ftrace_pid_trace);
2884 /* update the function call */
2885 ftrace_update_pid_func();
2886 ftrace_startup_enable(0);
2889 mutex_unlock(&ftrace_lock);
2894 static const struct file_operations ftrace_pid_fops = {
2895 .read = ftrace_pid_read,
2896 .write = ftrace_pid_write,
2899 static __init int ftrace_init_debugfs(void)
2901 struct dentry *d_tracer;
2903 d_tracer = tracing_init_dentry();
2907 ftrace_init_dyn_debugfs(d_tracer);
2909 trace_create_file("set_ftrace_pid", 0644, d_tracer,
2910 NULL, &ftrace_pid_fops);
2912 ftrace_profile_debugfs(d_tracer);
2916 fs_initcall(ftrace_init_debugfs);
2919 * ftrace_kill - kill ftrace
2921 * This function should be used by panic code. It stops ftrace
2922 * but in a not so nice way. If you need to simply kill ftrace
2923 * from a non-atomic section, use ftrace_kill.
2925 void ftrace_kill(void)
2927 ftrace_disabled = 1;
2929 clear_ftrace_function();
2933 * register_ftrace_function - register a function for profiling
2934 * @ops - ops structure that holds the function for profiling.
2936 * Register a function to be called by all functions in the
2939 * Note: @ops->func and all the functions it calls must be labeled
2940 * with "notrace", otherwise it will go into a
2943 int register_ftrace_function(struct ftrace_ops *ops)
2947 if (unlikely(ftrace_disabled))
2950 mutex_lock(&ftrace_lock);
2952 ret = __register_ftrace_function(ops);
2955 mutex_unlock(&ftrace_lock);
2960 * unregister_ftrace_function - unregister a function for profiling.
2961 * @ops - ops structure that holds the function to unregister
2963 * Unregister a function that was added to be called by ftrace profiling.
2965 int unregister_ftrace_function(struct ftrace_ops *ops)
2969 mutex_lock(&ftrace_lock);
2970 ret = __unregister_ftrace_function(ops);
2972 mutex_unlock(&ftrace_lock);
2978 ftrace_enable_sysctl(struct ctl_table *table, int write,
2979 struct file *file, void __user *buffer, size_t *lenp,
2984 if (unlikely(ftrace_disabled))
2987 mutex_lock(&ftrace_lock);
2989 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
2991 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
2994 last_ftrace_enabled = !!ftrace_enabled;
2996 if (ftrace_enabled) {
2998 ftrace_startup_sysctl();
3000 /* we are starting ftrace again */
3001 if (ftrace_list != &ftrace_list_end) {
3002 if (ftrace_list->next == &ftrace_list_end)
3003 ftrace_trace_function = ftrace_list->func;
3005 ftrace_trace_function = ftrace_list_func;
3009 /* stopping ftrace calls (just send to ftrace_stub) */
3010 ftrace_trace_function = ftrace_stub;
3012 ftrace_shutdown_sysctl();
3016 mutex_unlock(&ftrace_lock);
3020 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3022 static int ftrace_graph_active;
3023 static struct notifier_block ftrace_suspend_notifier;
3025 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
3030 /* The callbacks that hook a function */
3031 trace_func_graph_ret_t ftrace_graph_return =
3032 (trace_func_graph_ret_t)ftrace_stub;
3033 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
3035 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
3036 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
3040 unsigned long flags;
3041 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
3042 struct task_struct *g, *t;
3044 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
3045 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
3046 * sizeof(struct ftrace_ret_stack),
3048 if (!ret_stack_list[i]) {
3056 read_lock_irqsave(&tasklist_lock, flags);
3057 do_each_thread(g, t) {
3063 if (t->ret_stack == NULL) {
3064 atomic_set(&t->tracing_graph_pause, 0);
3065 atomic_set(&t->trace_overrun, 0);
3066 t->curr_ret_stack = -1;
3067 /* Make sure the tasks see the -1 first: */
3069 t->ret_stack = ret_stack_list[start++];
3071 } while_each_thread(g, t);
3074 read_unlock_irqrestore(&tasklist_lock, flags);
3076 for (i = start; i < end; i++)
3077 kfree(ret_stack_list[i]);
3082 ftrace_graph_probe_sched_switch(struct rq *__rq, struct task_struct *prev,
3083 struct task_struct *next)
3085 unsigned long long timestamp;
3089 * Does the user want to count the time a function was asleep.
3090 * If so, do not update the time stamps.
3092 if (trace_flags & TRACE_ITER_SLEEP_TIME)
3095 timestamp = trace_clock_local();
3097 prev->ftrace_timestamp = timestamp;
3099 /* only process tasks that we timestamped */
3100 if (!next->ftrace_timestamp)
3104 * Update all the counters in next to make up for the
3105 * time next was sleeping.
3107 timestamp -= next->ftrace_timestamp;
3109 for (index = next->curr_ret_stack; index >= 0; index--)
3110 next->ret_stack[index].calltime += timestamp;
3113 /* Allocate a return stack for each task */
3114 static int start_graph_tracing(void)
3116 struct ftrace_ret_stack **ret_stack_list;
3119 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
3120 sizeof(struct ftrace_ret_stack *),
3123 if (!ret_stack_list)
3126 /* The cpu_boot init_task->ret_stack will never be freed */
3127 for_each_online_cpu(cpu) {
3128 if (!idle_task(cpu)->ret_stack)
3129 ftrace_graph_init_task(idle_task(cpu));
3133 ret = alloc_retstack_tasklist(ret_stack_list);
3134 } while (ret == -EAGAIN);
3137 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch);
3139 pr_info("ftrace_graph: Couldn't activate tracepoint"
3140 " probe to kernel_sched_switch\n");
3143 kfree(ret_stack_list);
3148 * Hibernation protection.
3149 * The state of the current task is too much unstable during
3150 * suspend/restore to disk. We want to protect against that.
3153 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
3157 case PM_HIBERNATION_PREPARE:
3158 pause_graph_tracing();
3161 case PM_POST_HIBERNATION:
3162 unpause_graph_tracing();
3168 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
3169 trace_func_graph_ent_t entryfunc)
3173 mutex_lock(&ftrace_lock);
3175 /* we currently allow only one tracer registered at a time */
3176 if (ftrace_graph_active) {
3181 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
3182 register_pm_notifier(&ftrace_suspend_notifier);
3184 ftrace_graph_active++;
3185 ret = start_graph_tracing();
3187 ftrace_graph_active--;
3191 ftrace_graph_return = retfunc;
3192 ftrace_graph_entry = entryfunc;
3194 ftrace_startup(FTRACE_START_FUNC_RET);
3197 mutex_unlock(&ftrace_lock);
3201 void unregister_ftrace_graph(void)
3203 mutex_lock(&ftrace_lock);
3205 if (unlikely(!ftrace_graph_active))
3208 ftrace_graph_active--;
3209 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch);
3210 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
3211 ftrace_graph_entry = ftrace_graph_entry_stub;
3212 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
3213 unregister_pm_notifier(&ftrace_suspend_notifier);
3216 mutex_unlock(&ftrace_lock);
3219 /* Allocate a return stack for newly created task */
3220 void ftrace_graph_init_task(struct task_struct *t)
3222 /* Make sure we do not use the parent ret_stack */
3223 t->ret_stack = NULL;
3225 if (ftrace_graph_active) {
3226 struct ftrace_ret_stack *ret_stack;
3228 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
3229 * sizeof(struct ftrace_ret_stack),
3233 t->curr_ret_stack = -1;
3234 atomic_set(&t->tracing_graph_pause, 0);
3235 atomic_set(&t->trace_overrun, 0);
3236 t->ftrace_timestamp = 0;
3237 /* make curr_ret_stack visable before we add the ret_stack */
3239 t->ret_stack = ret_stack;
3243 void ftrace_graph_exit_task(struct task_struct *t)
3245 struct ftrace_ret_stack *ret_stack = t->ret_stack;
3247 t->ret_stack = NULL;
3248 /* NULL must become visible to IRQs before we free it: */
3254 void ftrace_graph_stop(void)