2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/kprobes.h>
26 #include <linux/ftrace.h>
27 #include <linux/sysctl.h>
28 #include <linux/ctype.h>
29 #include <linux/list.h>
30 #include <linux/hash.h>
32 #include <asm/ftrace.h>
36 #define FTRACE_WARN_ON(cond) \
42 #define FTRACE_WARN_ON_ONCE(cond) \
44 if (WARN_ON_ONCE(cond)) \
48 /* ftrace_enabled is a method to turn ftrace on or off */
49 int ftrace_enabled __read_mostly;
50 static int last_ftrace_enabled;
52 /* set when tracing only a pid */
53 struct pid *ftrace_pid_trace;
54 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
56 /* Quick disabling of function tracer. */
57 int function_trace_stop;
60 * ftrace_disabled is set when an anomaly is discovered.
61 * ftrace_disabled is much stronger than ftrace_enabled.
63 static int ftrace_disabled __read_mostly;
65 static DEFINE_MUTEX(ftrace_lock);
67 static struct ftrace_ops ftrace_list_end __read_mostly =
72 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
73 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
74 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
75 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
77 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
79 struct ftrace_ops *op = ftrace_list;
81 /* in case someone actually ports this to alpha! */
82 read_barrier_depends();
84 while (op != &ftrace_list_end) {
86 read_barrier_depends();
87 op->func(ip, parent_ip);
92 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
94 if (!test_tsk_trace_trace(current))
97 ftrace_pid_function(ip, parent_ip);
100 static void set_ftrace_pid_function(ftrace_func_t func)
102 /* do not set ftrace_pid_function to itself! */
103 if (func != ftrace_pid_func)
104 ftrace_pid_function = func;
108 * clear_ftrace_function - reset the ftrace function
110 * This NULLs the ftrace function and in essence stops
111 * tracing. There may be lag
113 void clear_ftrace_function(void)
115 ftrace_trace_function = ftrace_stub;
116 __ftrace_trace_function = ftrace_stub;
117 ftrace_pid_function = ftrace_stub;
120 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
122 * For those archs that do not test ftrace_trace_stop in their
123 * mcount call site, we need to do it from C.
125 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
127 if (function_trace_stop)
130 __ftrace_trace_function(ip, parent_ip);
134 static int __register_ftrace_function(struct ftrace_ops *ops)
136 ops->next = ftrace_list;
138 * We are entering ops into the ftrace_list but another
139 * CPU might be walking that list. We need to make sure
140 * the ops->next pointer is valid before another CPU sees
141 * the ops pointer included into the ftrace_list.
146 if (ftrace_enabled) {
149 if (ops->next == &ftrace_list_end)
152 func = ftrace_list_func;
154 if (ftrace_pid_trace) {
155 set_ftrace_pid_function(func);
156 func = ftrace_pid_func;
160 * For one func, simply call it directly.
161 * For more than one func, call the chain.
163 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
164 ftrace_trace_function = func;
166 __ftrace_trace_function = func;
167 ftrace_trace_function = ftrace_test_stop_func;
174 static int __unregister_ftrace_function(struct ftrace_ops *ops)
176 struct ftrace_ops **p;
179 * If we are removing the last function, then simply point
180 * to the ftrace_stub.
182 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
183 ftrace_trace_function = ftrace_stub;
184 ftrace_list = &ftrace_list_end;
188 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
197 if (ftrace_enabled) {
198 /* If we only have one func left, then call that directly */
199 if (ftrace_list->next == &ftrace_list_end) {
200 ftrace_func_t func = ftrace_list->func;
202 if (ftrace_pid_trace) {
203 set_ftrace_pid_function(func);
204 func = ftrace_pid_func;
206 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
207 ftrace_trace_function = func;
209 __ftrace_trace_function = func;
217 static void ftrace_update_pid_func(void)
221 mutex_lock(&ftrace_lock);
223 if (ftrace_trace_function == ftrace_stub)
226 func = ftrace_trace_function;
228 if (ftrace_pid_trace) {
229 set_ftrace_pid_function(func);
230 func = ftrace_pid_func;
232 if (func == ftrace_pid_func)
233 func = ftrace_pid_function;
236 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
237 ftrace_trace_function = func;
239 __ftrace_trace_function = func;
243 mutex_unlock(&ftrace_lock);
246 #ifdef CONFIG_DYNAMIC_FTRACE
247 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
248 # error Dynamic ftrace depends on MCOUNT_RECORD
252 FTRACE_ENABLE_CALLS = (1 << 0),
253 FTRACE_DISABLE_CALLS = (1 << 1),
254 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
255 FTRACE_ENABLE_MCOUNT = (1 << 3),
256 FTRACE_DISABLE_MCOUNT = (1 << 4),
257 FTRACE_START_FUNC_RET = (1 << 5),
258 FTRACE_STOP_FUNC_RET = (1 << 6),
261 static int ftrace_filtered;
263 static LIST_HEAD(ftrace_new_addrs);
265 static DEFINE_MUTEX(ftrace_regex_lock);
268 struct ftrace_page *next;
270 struct dyn_ftrace records[];
273 #define ENTRIES_PER_PAGE \
274 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
276 /* estimate from running different kernels */
277 #define NR_TO_INIT 10000
279 static struct ftrace_page *ftrace_pages_start;
280 static struct ftrace_page *ftrace_pages;
282 static struct dyn_ftrace *ftrace_free_records;
285 * This is a double for. Do not use 'break' to break out of the loop,
286 * you must use a goto.
288 #define do_for_each_ftrace_rec(pg, rec) \
289 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
291 for (_____i = 0; _____i < pg->index; _____i++) { \
292 rec = &pg->records[_____i];
294 #define while_for_each_ftrace_rec() \
298 #ifdef CONFIG_KPROBES
300 static int frozen_record_count;
302 static inline void freeze_record(struct dyn_ftrace *rec)
304 if (!(rec->flags & FTRACE_FL_FROZEN)) {
305 rec->flags |= FTRACE_FL_FROZEN;
306 frozen_record_count++;
310 static inline void unfreeze_record(struct dyn_ftrace *rec)
312 if (rec->flags & FTRACE_FL_FROZEN) {
313 rec->flags &= ~FTRACE_FL_FROZEN;
314 frozen_record_count--;
318 static inline int record_frozen(struct dyn_ftrace *rec)
320 return rec->flags & FTRACE_FL_FROZEN;
323 # define freeze_record(rec) ({ 0; })
324 # define unfreeze_record(rec) ({ 0; })
325 # define record_frozen(rec) ({ 0; })
326 #endif /* CONFIG_KPROBES */
328 static void ftrace_free_rec(struct dyn_ftrace *rec)
330 rec->ip = (unsigned long)ftrace_free_records;
331 ftrace_free_records = rec;
332 rec->flags |= FTRACE_FL_FREE;
335 void ftrace_release(void *start, unsigned long size)
337 struct dyn_ftrace *rec;
338 struct ftrace_page *pg;
339 unsigned long s = (unsigned long)start;
340 unsigned long e = s + size;
342 if (ftrace_disabled || !start)
345 mutex_lock(&ftrace_lock);
346 do_for_each_ftrace_rec(pg, rec) {
347 if ((rec->ip >= s) && (rec->ip < e))
348 ftrace_free_rec(rec);
349 } while_for_each_ftrace_rec();
350 mutex_unlock(&ftrace_lock);
353 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
355 struct dyn_ftrace *rec;
357 /* First check for freed records */
358 if (ftrace_free_records) {
359 rec = ftrace_free_records;
361 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
362 FTRACE_WARN_ON_ONCE(1);
363 ftrace_free_records = NULL;
367 ftrace_free_records = (void *)rec->ip;
368 memset(rec, 0, sizeof(*rec));
372 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
373 if (!ftrace_pages->next) {
374 /* allocate another page */
376 (void *)get_zeroed_page(GFP_KERNEL);
377 if (!ftrace_pages->next)
380 ftrace_pages = ftrace_pages->next;
383 return &ftrace_pages->records[ftrace_pages->index++];
386 static struct dyn_ftrace *
387 ftrace_record_ip(unsigned long ip)
389 struct dyn_ftrace *rec;
394 rec = ftrace_alloc_dyn_node(ip);
400 list_add(&rec->list, &ftrace_new_addrs);
405 static void print_ip_ins(const char *fmt, unsigned char *p)
409 printk(KERN_CONT "%s", fmt);
411 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
412 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
415 static void ftrace_bug(int failed, unsigned long ip)
419 FTRACE_WARN_ON_ONCE(1);
420 pr_info("ftrace faulted on modifying ");
424 FTRACE_WARN_ON_ONCE(1);
425 pr_info("ftrace failed to modify ");
427 print_ip_ins(" actual: ", (unsigned char *)ip);
428 printk(KERN_CONT "\n");
431 FTRACE_WARN_ON_ONCE(1);
432 pr_info("ftrace faulted on writing ");
436 FTRACE_WARN_ON_ONCE(1);
437 pr_info("ftrace faulted on unknown error ");
444 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
446 unsigned long ip, fl;
447 unsigned long ftrace_addr;
449 ftrace_addr = (unsigned long)FTRACE_ADDR;
454 * If this record is not to be traced and
455 * it is not enabled then do nothing.
457 * If this record is not to be traced and
458 * it is enabled then disable it.
461 if (rec->flags & FTRACE_FL_NOTRACE) {
462 if (rec->flags & FTRACE_FL_ENABLED)
463 rec->flags &= ~FTRACE_FL_ENABLED;
467 } else if (ftrace_filtered && enable) {
472 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
474 /* Record is filtered and enabled, do nothing */
475 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
478 /* Record is not filtered or enabled, do nothing */
482 /* Record is not filtered but enabled, disable it */
483 if (fl == FTRACE_FL_ENABLED)
484 rec->flags &= ~FTRACE_FL_ENABLED;
486 /* Otherwise record is filtered but not enabled, enable it */
487 rec->flags |= FTRACE_FL_ENABLED;
489 /* Disable or not filtered */
492 /* if record is enabled, do nothing */
493 if (rec->flags & FTRACE_FL_ENABLED)
496 rec->flags |= FTRACE_FL_ENABLED;
500 /* if record is not enabled, do nothing */
501 if (!(rec->flags & FTRACE_FL_ENABLED))
504 rec->flags &= ~FTRACE_FL_ENABLED;
508 if (rec->flags & FTRACE_FL_ENABLED)
509 return ftrace_make_call(rec, ftrace_addr);
511 return ftrace_make_nop(NULL, rec, ftrace_addr);
514 static void ftrace_replace_code(int enable)
517 struct dyn_ftrace *rec;
518 struct ftrace_page *pg;
520 do_for_each_ftrace_rec(pg, rec) {
522 * Skip over free records and records that have
525 if (rec->flags & FTRACE_FL_FREE ||
526 rec->flags & FTRACE_FL_FAILED)
529 /* ignore updates to this record's mcount site */
530 if (get_kprobe((void *)rec->ip)) {
534 unfreeze_record(rec);
537 failed = __ftrace_replace_code(rec, enable);
538 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
539 rec->flags |= FTRACE_FL_FAILED;
540 if ((system_state == SYSTEM_BOOTING) ||
541 !core_kernel_text(rec->ip)) {
542 ftrace_free_rec(rec);
544 ftrace_bug(failed, rec->ip);
546 } while_for_each_ftrace_rec();
550 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
557 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
560 rec->flags |= FTRACE_FL_FAILED;
566 static int __ftrace_modify_code(void *data)
570 if (*command & FTRACE_ENABLE_CALLS)
571 ftrace_replace_code(1);
572 else if (*command & FTRACE_DISABLE_CALLS)
573 ftrace_replace_code(0);
575 if (*command & FTRACE_UPDATE_TRACE_FUNC)
576 ftrace_update_ftrace_func(ftrace_trace_function);
578 if (*command & FTRACE_START_FUNC_RET)
579 ftrace_enable_ftrace_graph_caller();
580 else if (*command & FTRACE_STOP_FUNC_RET)
581 ftrace_disable_ftrace_graph_caller();
586 static void ftrace_run_update_code(int command)
588 stop_machine(__ftrace_modify_code, &command, NULL);
591 static ftrace_func_t saved_ftrace_func;
592 static int ftrace_start_up;
594 static void ftrace_startup_enable(int command)
596 if (saved_ftrace_func != ftrace_trace_function) {
597 saved_ftrace_func = ftrace_trace_function;
598 command |= FTRACE_UPDATE_TRACE_FUNC;
601 if (!command || !ftrace_enabled)
604 ftrace_run_update_code(command);
607 static void ftrace_startup(int command)
609 if (unlikely(ftrace_disabled))
613 command |= FTRACE_ENABLE_CALLS;
615 ftrace_startup_enable(command);
618 static void ftrace_shutdown(int command)
620 if (unlikely(ftrace_disabled))
624 if (!ftrace_start_up)
625 command |= FTRACE_DISABLE_CALLS;
627 if (saved_ftrace_func != ftrace_trace_function) {
628 saved_ftrace_func = ftrace_trace_function;
629 command |= FTRACE_UPDATE_TRACE_FUNC;
632 if (!command || !ftrace_enabled)
635 ftrace_run_update_code(command);
638 static void ftrace_startup_sysctl(void)
640 int command = FTRACE_ENABLE_MCOUNT;
642 if (unlikely(ftrace_disabled))
645 /* Force update next time */
646 saved_ftrace_func = NULL;
647 /* ftrace_start_up is true if we want ftrace running */
649 command |= FTRACE_ENABLE_CALLS;
651 ftrace_run_update_code(command);
654 static void ftrace_shutdown_sysctl(void)
656 int command = FTRACE_DISABLE_MCOUNT;
658 if (unlikely(ftrace_disabled))
661 /* ftrace_start_up is true if ftrace is running */
663 command |= FTRACE_DISABLE_CALLS;
665 ftrace_run_update_code(command);
668 static cycle_t ftrace_update_time;
669 static unsigned long ftrace_update_cnt;
670 unsigned long ftrace_update_tot_cnt;
672 static int ftrace_update_code(struct module *mod)
674 struct dyn_ftrace *p, *t;
677 start = ftrace_now(raw_smp_processor_id());
678 ftrace_update_cnt = 0;
680 list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
682 /* If something went wrong, bail without enabling anything */
683 if (unlikely(ftrace_disabled))
686 list_del_init(&p->list);
688 /* convert record (i.e, patch mcount-call with NOP) */
689 if (ftrace_code_disable(mod, p)) {
690 p->flags |= FTRACE_FL_CONVERTED;
696 stop = ftrace_now(raw_smp_processor_id());
697 ftrace_update_time = stop - start;
698 ftrace_update_tot_cnt += ftrace_update_cnt;
703 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
705 struct ftrace_page *pg;
709 /* allocate a few pages */
710 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
711 if (!ftrace_pages_start)
715 * Allocate a few more pages.
717 * TODO: have some parser search vmlinux before
718 * final linking to find all calls to ftrace.
720 * a) know how many pages to allocate.
722 * b) set up the table then.
724 * The dynamic code is still necessary for
728 pg = ftrace_pages = ftrace_pages_start;
730 cnt = num_to_init / ENTRIES_PER_PAGE;
731 pr_info("ftrace: allocating %ld entries in %d pages\n",
732 num_to_init, cnt + 1);
734 for (i = 0; i < cnt; i++) {
735 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
737 /* If we fail, we'll try later anyway */
748 FTRACE_ITER_FILTER = (1 << 0),
749 FTRACE_ITER_CONT = (1 << 1),
750 FTRACE_ITER_NOTRACE = (1 << 2),
751 FTRACE_ITER_FAILURES = (1 << 3),
752 FTRACE_ITER_PRINTALL = (1 << 4),
755 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
757 struct ftrace_iterator {
758 struct ftrace_page *pg;
761 unsigned char buffer[FTRACE_BUFF_MAX+1];
767 t_next(struct seq_file *m, void *v, loff_t *pos)
769 struct ftrace_iterator *iter = m->private;
770 struct dyn_ftrace *rec = NULL;
774 if (iter->flags & FTRACE_ITER_PRINTALL)
777 mutex_lock(&ftrace_lock);
779 if (iter->idx >= iter->pg->index) {
780 if (iter->pg->next) {
781 iter->pg = iter->pg->next;
788 rec = &iter->pg->records[iter->idx++];
789 if ((rec->flags & FTRACE_FL_FREE) ||
791 (!(iter->flags & FTRACE_ITER_FAILURES) &&
792 (rec->flags & FTRACE_FL_FAILED)) ||
794 ((iter->flags & FTRACE_ITER_FAILURES) &&
795 !(rec->flags & FTRACE_FL_FAILED)) ||
797 ((iter->flags & FTRACE_ITER_FILTER) &&
798 !(rec->flags & FTRACE_FL_FILTER)) ||
800 ((iter->flags & FTRACE_ITER_NOTRACE) &&
801 !(rec->flags & FTRACE_FL_NOTRACE))) {
806 mutex_unlock(&ftrace_lock);
811 static void *t_start(struct seq_file *m, loff_t *pos)
813 struct ftrace_iterator *iter = m->private;
817 * For set_ftrace_filter reading, if we have the filter
818 * off, we can short cut and just print out that all
819 * functions are enabled.
821 if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
824 iter->flags |= FTRACE_ITER_PRINTALL;
836 p = t_next(m, p, pos);
841 static void t_stop(struct seq_file *m, void *p)
845 static int t_show(struct seq_file *m, void *v)
847 struct ftrace_iterator *iter = m->private;
848 struct dyn_ftrace *rec = v;
849 char str[KSYM_SYMBOL_LEN];
851 if (iter->flags & FTRACE_ITER_PRINTALL) {
852 seq_printf(m, "#### all functions enabled ####\n");
859 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
861 seq_printf(m, "%s\n", str);
866 static struct seq_operations show_ftrace_seq_ops = {
874 ftrace_avail_open(struct inode *inode, struct file *file)
876 struct ftrace_iterator *iter;
879 if (unlikely(ftrace_disabled))
882 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
886 iter->pg = ftrace_pages_start;
888 ret = seq_open(file, &show_ftrace_seq_ops);
890 struct seq_file *m = file->private_data;
900 int ftrace_avail_release(struct inode *inode, struct file *file)
902 struct seq_file *m = (struct seq_file *)file->private_data;
903 struct ftrace_iterator *iter = m->private;
905 seq_release(inode, file);
912 ftrace_failures_open(struct inode *inode, struct file *file)
916 struct ftrace_iterator *iter;
918 ret = ftrace_avail_open(inode, file);
920 m = (struct seq_file *)file->private_data;
921 iter = (struct ftrace_iterator *)m->private;
922 iter->flags = FTRACE_ITER_FAILURES;
929 static void ftrace_filter_reset(int enable)
931 struct ftrace_page *pg;
932 struct dyn_ftrace *rec;
933 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
935 mutex_lock(&ftrace_lock);
938 do_for_each_ftrace_rec(pg, rec) {
939 if (rec->flags & FTRACE_FL_FAILED)
942 } while_for_each_ftrace_rec();
943 mutex_unlock(&ftrace_lock);
947 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
949 struct ftrace_iterator *iter;
952 if (unlikely(ftrace_disabled))
955 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
959 mutex_lock(&ftrace_regex_lock);
960 if ((file->f_mode & FMODE_WRITE) &&
961 !(file->f_flags & O_APPEND))
962 ftrace_filter_reset(enable);
964 if (file->f_mode & FMODE_READ) {
965 iter->pg = ftrace_pages_start;
966 iter->flags = enable ? FTRACE_ITER_FILTER :
969 ret = seq_open(file, &show_ftrace_seq_ops);
971 struct seq_file *m = file->private_data;
976 file->private_data = iter;
977 mutex_unlock(&ftrace_regex_lock);
983 ftrace_filter_open(struct inode *inode, struct file *file)
985 return ftrace_regex_open(inode, file, 1);
989 ftrace_notrace_open(struct inode *inode, struct file *file)
991 return ftrace_regex_open(inode, file, 0);
995 ftrace_regex_read(struct file *file, char __user *ubuf,
996 size_t cnt, loff_t *ppos)
998 if (file->f_mode & FMODE_READ)
999 return seq_read(file, ubuf, cnt, ppos);
1005 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1009 if (file->f_mode & FMODE_READ)
1010 ret = seq_lseek(file, offset, origin);
1012 file->f_pos = ret = 1;
1025 * (static function - no need for kernel doc)
1027 * Pass in a buffer containing a glob and this function will
1028 * set search to point to the search part of the buffer and
1029 * return the type of search it is (see enum above).
1030 * This does modify buff.
1032 * Returns enum type.
1033 * search returns the pointer to use for comparison.
1034 * not returns 1 if buff started with a '!'
1038 ftrace_setup_glob(char *buff, int len, char **search, int *not)
1040 int type = MATCH_FULL;
1043 if (buff[0] == '!') {
1052 for (i = 0; i < len; i++) {
1053 if (buff[i] == '*') {
1056 type = MATCH_END_ONLY;
1058 if (type == MATCH_END_ONLY)
1059 type = MATCH_MIDDLE_ONLY;
1061 type = MATCH_FRONT_ONLY;
1071 static int ftrace_match(char *str, char *regex, int len, int type)
1078 if (strcmp(str, regex) == 0)
1081 case MATCH_FRONT_ONLY:
1082 if (strncmp(str, regex, len) == 0)
1085 case MATCH_MIDDLE_ONLY:
1086 if (strstr(str, regex))
1089 case MATCH_END_ONLY:
1090 ptr = strstr(str, regex);
1091 if (ptr && (ptr[len] == 0))
1100 ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
1102 char str[KSYM_SYMBOL_LEN];
1104 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1105 return ftrace_match(str, regex, len, type);
1108 static void ftrace_match_records(char *buff, int len, int enable)
1111 struct ftrace_page *pg;
1112 struct dyn_ftrace *rec;
1114 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1115 unsigned search_len;
1118 type = ftrace_setup_glob(buff, len, &search, ¬);
1120 search_len = strlen(search);
1122 mutex_lock(&ftrace_lock);
1123 do_for_each_ftrace_rec(pg, rec) {
1125 if (rec->flags & FTRACE_FL_FAILED)
1128 if (ftrace_match_record(rec, search, search_len, type)) {
1130 rec->flags &= ~flag;
1135 * Only enable filtering if we have a function that
1138 if (enable && (rec->flags & FTRACE_FL_FILTER))
1139 ftrace_filtered = 1;
1140 } while_for_each_ftrace_rec();
1141 mutex_unlock(&ftrace_lock);
1145 ftrace_match_module_record(struct dyn_ftrace *rec, char *mod,
1146 char *regex, int len, int type)
1148 char str[KSYM_SYMBOL_LEN];
1151 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
1153 if (!modname || strcmp(modname, mod))
1156 /* blank search means to match all funcs in the mod */
1158 return ftrace_match(str, regex, len, type);
1163 static void ftrace_match_module_records(char *buff, char *mod, int enable)
1165 char *search = buff;
1166 struct ftrace_page *pg;
1167 struct dyn_ftrace *rec;
1168 int type = MATCH_FULL;
1169 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1170 unsigned search_len = 0;
1173 /* blank or '*' mean the same */
1174 if (strcmp(buff, "*") == 0)
1177 /* handle the case of 'dont filter this module' */
1178 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
1184 type = ftrace_setup_glob(buff, strlen(buff), &search, ¬);
1185 search_len = strlen(search);
1188 mutex_lock(&ftrace_lock);
1189 do_for_each_ftrace_rec(pg, rec) {
1191 if (rec->flags & FTRACE_FL_FAILED)
1194 if (ftrace_match_module_record(rec, mod,
1195 search, search_len, type)) {
1197 rec->flags &= ~flag;
1201 if (enable && (rec->flags & FTRACE_FL_FILTER))
1202 ftrace_filtered = 1;
1204 } while_for_each_ftrace_rec();
1205 mutex_unlock(&ftrace_lock);
1209 * We register the module command as a template to show others how
1210 * to register the a command as well.
1214 ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
1219 * cmd == 'mod' because we only registered this func
1220 * for the 'mod' ftrace_func_command.
1221 * But if you register one func with multiple commands,
1222 * you can tell which command was used by the cmd
1226 /* we must have a module name */
1230 mod = strsep(¶m, ":");
1234 ftrace_match_module_records(func, mod, enable);
1238 static struct ftrace_func_command ftrace_mod_cmd = {
1240 .func = ftrace_mod_callback,
1243 static int __init ftrace_mod_cmd_init(void)
1245 return register_ftrace_command(&ftrace_mod_cmd);
1247 device_initcall(ftrace_mod_cmd_init);
1249 #define FTRACE_HASH_BITS 7
1250 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
1251 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
1253 struct ftrace_func_hook {
1254 struct hlist_node node;
1255 struct ftrace_hook_ops *ops;
1256 unsigned long flags;
1259 struct rcu_head rcu;
1263 function_trace_hook_call(unsigned long ip, unsigned long parent_ip)
1265 struct ftrace_func_hook *entry;
1266 struct hlist_head *hhd;
1267 struct hlist_node *n;
1271 key = hash_long(ip, FTRACE_HASH_BITS);
1273 hhd = &ftrace_func_hash[key];
1275 if (hlist_empty(hhd))
1279 * Disable preemption for these calls to prevent a RCU grace
1280 * period. This syncs the hash iteration and freeing of items
1281 * on the hash. rcu_read_lock is too dangerous here.
1283 resched = ftrace_preempt_disable();
1284 hlist_for_each_entry_rcu(entry, n, hhd, node) {
1285 if (entry->ip == ip)
1286 entry->ops->func(ip, parent_ip, &entry->data);
1288 ftrace_preempt_enable(resched);
1291 static struct ftrace_ops trace_hook_ops __read_mostly =
1293 .func = function_trace_hook_call,
1296 static int ftrace_hook_registered;
1298 static void __enable_ftrace_function_hook(void)
1302 if (ftrace_hook_registered)
1305 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1306 struct hlist_head *hhd = &ftrace_func_hash[i];
1310 /* Nothing registered? */
1311 if (i == FTRACE_FUNC_HASHSIZE)
1314 __register_ftrace_function(&trace_hook_ops);
1316 ftrace_hook_registered = 1;
1319 static void __disable_ftrace_function_hook(void)
1323 if (!ftrace_hook_registered)
1326 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1327 struct hlist_head *hhd = &ftrace_func_hash[i];
1332 /* no more funcs left */
1333 __unregister_ftrace_function(&trace_hook_ops);
1335 ftrace_hook_registered = 0;
1339 static void ftrace_free_entry_rcu(struct rcu_head *rhp)
1341 struct ftrace_func_hook *entry =
1342 container_of(rhp, struct ftrace_func_hook, rcu);
1344 if (entry->ops->free)
1345 entry->ops->free(&entry->data);
1351 register_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
1354 struct ftrace_func_hook *entry;
1355 struct ftrace_page *pg;
1356 struct dyn_ftrace *rec;
1362 type = ftrace_setup_glob(glob, strlen(glob), &search, ¬);
1363 len = strlen(search);
1365 /* we do not support '!' for function hooks */
1369 mutex_lock(&ftrace_lock);
1370 do_for_each_ftrace_rec(pg, rec) {
1372 if (rec->flags & FTRACE_FL_FAILED)
1375 if (!ftrace_match_record(rec, search, len, type))
1378 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1380 /* If we did not hook to any, then return error */
1391 * The caller might want to do something special
1392 * for each function we find. We call the callback
1393 * to give the caller an opportunity to do so.
1395 if (ops->callback) {
1396 if (ops->callback(rec->ip, &entry->data) < 0) {
1397 /* caller does not like this func */
1404 entry->ip = rec->ip;
1406 key = hash_long(entry->ip, FTRACE_HASH_BITS);
1407 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
1409 } while_for_each_ftrace_rec();
1410 __enable_ftrace_function_hook();
1413 mutex_unlock(&ftrace_lock);
1424 __unregister_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
1425 void *data, int flags)
1427 struct ftrace_func_hook *entry;
1428 struct hlist_node *n, *tmp;
1429 char str[KSYM_SYMBOL_LEN];
1430 int type = MATCH_FULL;
1434 if (glob && (strcmp(glob, "*") || !strlen(glob)))
1439 type = ftrace_setup_glob(glob, strlen(glob), &search, ¬);
1440 len = strlen(search);
1442 /* we do not support '!' for function hooks */
1447 mutex_lock(&ftrace_lock);
1448 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1449 struct hlist_head *hhd = &ftrace_func_hash[i];
1451 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
1453 /* break up if statements for readability */
1454 if ((flags & HOOK_TEST_FUNC) && entry->ops != ops)
1457 if ((flags & HOOK_TEST_DATA) && entry->data != data)
1460 /* do this last, since it is the most expensive */
1462 kallsyms_lookup(entry->ip, NULL, NULL,
1464 if (!ftrace_match(str, glob, len, type))
1468 hlist_del(&entry->node);
1469 call_rcu(&entry->rcu, ftrace_free_entry_rcu);
1472 __disable_ftrace_function_hook();
1473 mutex_unlock(&ftrace_lock);
1477 unregister_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
1480 __unregister_ftrace_function_hook(glob, ops, data,
1481 HOOK_TEST_FUNC | HOOK_TEST_DATA);
1485 unregister_ftrace_function_hook_func(char *glob, struct ftrace_hook_ops *ops)
1487 __unregister_ftrace_function_hook(glob, ops, NULL, HOOK_TEST_FUNC);
1490 void unregister_ftrace_function_hook_all(char *glob)
1492 __unregister_ftrace_function_hook(glob, NULL, NULL, 0);
1495 static LIST_HEAD(ftrace_commands);
1496 static DEFINE_MUTEX(ftrace_cmd_mutex);
1498 int register_ftrace_command(struct ftrace_func_command *cmd)
1500 struct ftrace_func_command *p;
1503 mutex_lock(&ftrace_cmd_mutex);
1504 list_for_each_entry(p, &ftrace_commands, list) {
1505 if (strcmp(cmd->name, p->name) == 0) {
1510 list_add(&cmd->list, &ftrace_commands);
1512 mutex_unlock(&ftrace_cmd_mutex);
1517 int unregister_ftrace_command(struct ftrace_func_command *cmd)
1519 struct ftrace_func_command *p, *n;
1522 mutex_lock(&ftrace_cmd_mutex);
1523 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
1524 if (strcmp(cmd->name, p->name) == 0) {
1526 list_del_init(&p->list);
1531 mutex_unlock(&ftrace_cmd_mutex);
1536 static int ftrace_process_regex(char *buff, int len, int enable)
1538 struct ftrace_func_command *p;
1539 char *func, *command, *next = buff;
1542 func = strsep(&next, ":");
1545 ftrace_match_records(func, len, enable);
1551 command = strsep(&next, ":");
1553 mutex_lock(&ftrace_cmd_mutex);
1554 list_for_each_entry(p, &ftrace_commands, list) {
1555 if (strcmp(p->name, command) == 0) {
1556 ret = p->func(func, command, next, enable);
1561 mutex_unlock(&ftrace_cmd_mutex);
1567 ftrace_regex_write(struct file *file, const char __user *ubuf,
1568 size_t cnt, loff_t *ppos, int enable)
1570 struct ftrace_iterator *iter;
1575 if (!cnt || cnt < 0)
1578 mutex_lock(&ftrace_regex_lock);
1580 if (file->f_mode & FMODE_READ) {
1581 struct seq_file *m = file->private_data;
1584 iter = file->private_data;
1587 iter->flags &= ~FTRACE_ITER_CONT;
1588 iter->buffer_idx = 0;
1591 ret = get_user(ch, ubuf++);
1597 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1598 /* skip white space */
1599 while (cnt && isspace(ch)) {
1600 ret = get_user(ch, ubuf++);
1608 file->f_pos += read;
1613 iter->buffer_idx = 0;
1616 while (cnt && !isspace(ch)) {
1617 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1618 iter->buffer[iter->buffer_idx++] = ch;
1623 ret = get_user(ch, ubuf++);
1632 iter->buffer[iter->buffer_idx] = 0;
1633 ret = ftrace_process_regex(iter->buffer,
1634 iter->buffer_idx, enable);
1637 iter->buffer_idx = 0;
1639 iter->flags |= FTRACE_ITER_CONT;
1642 file->f_pos += read;
1646 mutex_unlock(&ftrace_regex_lock);
1652 ftrace_filter_write(struct file *file, const char __user *ubuf,
1653 size_t cnt, loff_t *ppos)
1655 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1659 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1660 size_t cnt, loff_t *ppos)
1662 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1666 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1668 if (unlikely(ftrace_disabled))
1671 mutex_lock(&ftrace_regex_lock);
1673 ftrace_filter_reset(enable);
1675 ftrace_match_records(buf, len, enable);
1676 mutex_unlock(&ftrace_regex_lock);
1680 * ftrace_set_filter - set a function to filter on in ftrace
1681 * @buf - the string that holds the function filter text.
1682 * @len - the length of the string.
1683 * @reset - non zero to reset all filters before applying this filter.
1685 * Filters denote which functions should be enabled when tracing is enabled.
1686 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1688 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1690 ftrace_set_regex(buf, len, reset, 1);
1694 * ftrace_set_notrace - set a function to not trace in ftrace
1695 * @buf - the string that holds the function notrace text.
1696 * @len - the length of the string.
1697 * @reset - non zero to reset all filters before applying this filter.
1699 * Notrace Filters denote which functions should not be enabled when tracing
1700 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1703 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1705 ftrace_set_regex(buf, len, reset, 0);
1709 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1711 struct seq_file *m = (struct seq_file *)file->private_data;
1712 struct ftrace_iterator *iter;
1714 mutex_lock(&ftrace_regex_lock);
1715 if (file->f_mode & FMODE_READ) {
1718 seq_release(inode, file);
1720 iter = file->private_data;
1722 if (iter->buffer_idx) {
1724 iter->buffer[iter->buffer_idx] = 0;
1725 ftrace_match_records(iter->buffer, iter->buffer_idx, enable);
1728 mutex_lock(&ftrace_lock);
1729 if (ftrace_start_up && ftrace_enabled)
1730 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1731 mutex_unlock(&ftrace_lock);
1734 mutex_unlock(&ftrace_regex_lock);
1739 ftrace_filter_release(struct inode *inode, struct file *file)
1741 return ftrace_regex_release(inode, file, 1);
1745 ftrace_notrace_release(struct inode *inode, struct file *file)
1747 return ftrace_regex_release(inode, file, 0);
1750 static struct file_operations ftrace_avail_fops = {
1751 .open = ftrace_avail_open,
1753 .llseek = seq_lseek,
1754 .release = ftrace_avail_release,
1757 static struct file_operations ftrace_failures_fops = {
1758 .open = ftrace_failures_open,
1760 .llseek = seq_lseek,
1761 .release = ftrace_avail_release,
1764 static struct file_operations ftrace_filter_fops = {
1765 .open = ftrace_filter_open,
1766 .read = ftrace_regex_read,
1767 .write = ftrace_filter_write,
1768 .llseek = ftrace_regex_lseek,
1769 .release = ftrace_filter_release,
1772 static struct file_operations ftrace_notrace_fops = {
1773 .open = ftrace_notrace_open,
1774 .read = ftrace_regex_read,
1775 .write = ftrace_notrace_write,
1776 .llseek = ftrace_regex_lseek,
1777 .release = ftrace_notrace_release,
1780 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1782 static DEFINE_MUTEX(graph_lock);
1784 int ftrace_graph_count;
1785 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
1788 g_next(struct seq_file *m, void *v, loff_t *pos)
1790 unsigned long *array = m->private;
1795 if (index >= ftrace_graph_count)
1798 return &array[index];
1801 static void *g_start(struct seq_file *m, loff_t *pos)
1805 mutex_lock(&graph_lock);
1807 p = g_next(m, p, pos);
1812 static void g_stop(struct seq_file *m, void *p)
1814 mutex_unlock(&graph_lock);
1817 static int g_show(struct seq_file *m, void *v)
1819 unsigned long *ptr = v;
1820 char str[KSYM_SYMBOL_LEN];
1825 kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
1827 seq_printf(m, "%s\n", str);
1832 static struct seq_operations ftrace_graph_seq_ops = {
1840 ftrace_graph_open(struct inode *inode, struct file *file)
1844 if (unlikely(ftrace_disabled))
1847 mutex_lock(&graph_lock);
1848 if ((file->f_mode & FMODE_WRITE) &&
1849 !(file->f_flags & O_APPEND)) {
1850 ftrace_graph_count = 0;
1851 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
1854 if (file->f_mode & FMODE_READ) {
1855 ret = seq_open(file, &ftrace_graph_seq_ops);
1857 struct seq_file *m = file->private_data;
1858 m->private = ftrace_graph_funcs;
1861 file->private_data = ftrace_graph_funcs;
1862 mutex_unlock(&graph_lock);
1868 ftrace_graph_read(struct file *file, char __user *ubuf,
1869 size_t cnt, loff_t *ppos)
1871 if (file->f_mode & FMODE_READ)
1872 return seq_read(file, ubuf, cnt, ppos);
1878 ftrace_set_func(unsigned long *array, int idx, char *buffer)
1880 char str[KSYM_SYMBOL_LEN];
1881 struct dyn_ftrace *rec;
1882 struct ftrace_page *pg;
1886 if (ftrace_disabled)
1889 mutex_lock(&ftrace_lock);
1890 do_for_each_ftrace_rec(pg, rec) {
1892 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
1895 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1896 if (strcmp(str, buffer) == 0) {
1897 /* Return 1 if we add it to the array */
1899 for (j = 0; j < idx; j++)
1900 if (array[j] == rec->ip) {
1905 array[idx] = rec->ip;
1908 } while_for_each_ftrace_rec();
1910 mutex_unlock(&ftrace_lock);
1912 return found ? 0 : -EINVAL;
1916 ftrace_graph_write(struct file *file, const char __user *ubuf,
1917 size_t cnt, loff_t *ppos)
1919 unsigned char buffer[FTRACE_BUFF_MAX+1];
1920 unsigned long *array;
1926 if (!cnt || cnt < 0)
1929 mutex_lock(&graph_lock);
1931 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
1936 if (file->f_mode & FMODE_READ) {
1937 struct seq_file *m = file->private_data;
1940 array = file->private_data;
1942 ret = get_user(ch, ubuf++);
1948 /* skip white space */
1949 while (cnt && isspace(ch)) {
1950 ret = get_user(ch, ubuf++);
1963 while (cnt && !isspace(ch)) {
1964 if (index < FTRACE_BUFF_MAX)
1965 buffer[index++] = ch;
1970 ret = get_user(ch, ubuf++);
1978 /* we allow only one at a time */
1979 ret = ftrace_set_func(array, ftrace_graph_count, buffer);
1983 ftrace_graph_count++;
1985 file->f_pos += read;
1989 mutex_unlock(&graph_lock);
1994 static const struct file_operations ftrace_graph_fops = {
1995 .open = ftrace_graph_open,
1996 .read = ftrace_graph_read,
1997 .write = ftrace_graph_write,
1999 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2001 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
2003 struct dentry *entry;
2005 entry = debugfs_create_file("available_filter_functions", 0444,
2006 d_tracer, NULL, &ftrace_avail_fops);
2008 pr_warning("Could not create debugfs "
2009 "'available_filter_functions' entry\n");
2011 entry = debugfs_create_file("failures", 0444,
2012 d_tracer, NULL, &ftrace_failures_fops);
2014 pr_warning("Could not create debugfs 'failures' entry\n");
2016 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
2017 NULL, &ftrace_filter_fops);
2019 pr_warning("Could not create debugfs "
2020 "'set_ftrace_filter' entry\n");
2022 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
2023 NULL, &ftrace_notrace_fops);
2025 pr_warning("Could not create debugfs "
2026 "'set_ftrace_notrace' entry\n");
2028 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2029 entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
2031 &ftrace_graph_fops);
2033 pr_warning("Could not create debugfs "
2034 "'set_graph_function' entry\n");
2035 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2040 static int ftrace_convert_nops(struct module *mod,
2041 unsigned long *start,
2046 unsigned long flags;
2048 mutex_lock(&ftrace_lock);
2051 addr = ftrace_call_adjust(*p++);
2053 * Some architecture linkers will pad between
2054 * the different mcount_loc sections of different
2055 * object files to satisfy alignments.
2056 * Skip any NULL pointers.
2060 ftrace_record_ip(addr);
2063 /* disable interrupts to prevent kstop machine */
2064 local_irq_save(flags);
2065 ftrace_update_code(mod);
2066 local_irq_restore(flags);
2067 mutex_unlock(&ftrace_lock);
2072 void ftrace_init_module(struct module *mod,
2073 unsigned long *start, unsigned long *end)
2075 if (ftrace_disabled || start == end)
2077 ftrace_convert_nops(mod, start, end);
2080 extern unsigned long __start_mcount_loc[];
2081 extern unsigned long __stop_mcount_loc[];
2083 void __init ftrace_init(void)
2085 unsigned long count, addr, flags;
2088 /* Keep the ftrace pointer to the stub */
2089 addr = (unsigned long)ftrace_stub;
2091 local_irq_save(flags);
2092 ftrace_dyn_arch_init(&addr);
2093 local_irq_restore(flags);
2095 /* ftrace_dyn_arch_init places the return code in addr */
2099 count = __stop_mcount_loc - __start_mcount_loc;
2101 ret = ftrace_dyn_table_alloc(count);
2105 last_ftrace_enabled = ftrace_enabled = 1;
2107 ret = ftrace_convert_nops(NULL,
2113 ftrace_disabled = 1;
2118 static int __init ftrace_nodyn_init(void)
2123 device_initcall(ftrace_nodyn_init);
2125 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
2126 static inline void ftrace_startup_enable(int command) { }
2127 /* Keep as macros so we do not need to define the commands */
2128 # define ftrace_startup(command) do { } while (0)
2129 # define ftrace_shutdown(command) do { } while (0)
2130 # define ftrace_startup_sysctl() do { } while (0)
2131 # define ftrace_shutdown_sysctl() do { } while (0)
2132 #endif /* CONFIG_DYNAMIC_FTRACE */
2135 ftrace_pid_read(struct file *file, char __user *ubuf,
2136 size_t cnt, loff_t *ppos)
2141 if (ftrace_pid_trace == ftrace_swapper_pid)
2142 r = sprintf(buf, "swapper tasks\n");
2143 else if (ftrace_pid_trace)
2144 r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace));
2146 r = sprintf(buf, "no pid\n");
2148 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2151 static void clear_ftrace_swapper(void)
2153 struct task_struct *p;
2157 for_each_online_cpu(cpu) {
2159 clear_tsk_trace_trace(p);
2164 static void set_ftrace_swapper(void)
2166 struct task_struct *p;
2170 for_each_online_cpu(cpu) {
2172 set_tsk_trace_trace(p);
2177 static void clear_ftrace_pid(struct pid *pid)
2179 struct task_struct *p;
2182 do_each_pid_task(pid, PIDTYPE_PID, p) {
2183 clear_tsk_trace_trace(p);
2184 } while_each_pid_task(pid, PIDTYPE_PID, p);
2190 static void set_ftrace_pid(struct pid *pid)
2192 struct task_struct *p;
2195 do_each_pid_task(pid, PIDTYPE_PID, p) {
2196 set_tsk_trace_trace(p);
2197 } while_each_pid_task(pid, PIDTYPE_PID, p);
2201 static void clear_ftrace_pid_task(struct pid **pid)
2203 if (*pid == ftrace_swapper_pid)
2204 clear_ftrace_swapper();
2206 clear_ftrace_pid(*pid);
2211 static void set_ftrace_pid_task(struct pid *pid)
2213 if (pid == ftrace_swapper_pid)
2214 set_ftrace_swapper();
2216 set_ftrace_pid(pid);
2220 ftrace_pid_write(struct file *filp, const char __user *ubuf,
2221 size_t cnt, loff_t *ppos)
2228 if (cnt >= sizeof(buf))
2231 if (copy_from_user(&buf, ubuf, cnt))
2236 ret = strict_strtol(buf, 10, &val);
2240 mutex_lock(&ftrace_lock);
2242 /* disable pid tracing */
2243 if (!ftrace_pid_trace)
2246 clear_ftrace_pid_task(&ftrace_pid_trace);
2249 /* swapper task is special */
2251 pid = ftrace_swapper_pid;
2252 if (pid == ftrace_pid_trace)
2255 pid = find_get_pid(val);
2257 if (pid == ftrace_pid_trace) {
2263 if (ftrace_pid_trace)
2264 clear_ftrace_pid_task(&ftrace_pid_trace);
2269 ftrace_pid_trace = pid;
2271 set_ftrace_pid_task(ftrace_pid_trace);
2274 /* update the function call */
2275 ftrace_update_pid_func();
2276 ftrace_startup_enable(0);
2279 mutex_unlock(&ftrace_lock);
2284 static struct file_operations ftrace_pid_fops = {
2285 .read = ftrace_pid_read,
2286 .write = ftrace_pid_write,
2289 static __init int ftrace_init_debugfs(void)
2291 struct dentry *d_tracer;
2292 struct dentry *entry;
2294 d_tracer = tracing_init_dentry();
2298 ftrace_init_dyn_debugfs(d_tracer);
2300 entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
2301 NULL, &ftrace_pid_fops);
2303 pr_warning("Could not create debugfs "
2304 "'set_ftrace_pid' entry\n");
2308 fs_initcall(ftrace_init_debugfs);
2311 * ftrace_kill - kill ftrace
2313 * This function should be used by panic code. It stops ftrace
2314 * but in a not so nice way. If you need to simply kill ftrace
2315 * from a non-atomic section, use ftrace_kill.
2317 void ftrace_kill(void)
2319 ftrace_disabled = 1;
2321 clear_ftrace_function();
2325 * register_ftrace_function - register a function for profiling
2326 * @ops - ops structure that holds the function for profiling.
2328 * Register a function to be called by all functions in the
2331 * Note: @ops->func and all the functions it calls must be labeled
2332 * with "notrace", otherwise it will go into a
2335 int register_ftrace_function(struct ftrace_ops *ops)
2339 if (unlikely(ftrace_disabled))
2342 mutex_lock(&ftrace_lock);
2344 ret = __register_ftrace_function(ops);
2347 mutex_unlock(&ftrace_lock);
2352 * unregister_ftrace_function - unregister a function for profiling.
2353 * @ops - ops structure that holds the function to unregister
2355 * Unregister a function that was added to be called by ftrace profiling.
2357 int unregister_ftrace_function(struct ftrace_ops *ops)
2361 mutex_lock(&ftrace_lock);
2362 ret = __unregister_ftrace_function(ops);
2364 mutex_unlock(&ftrace_lock);
2370 ftrace_enable_sysctl(struct ctl_table *table, int write,
2371 struct file *file, void __user *buffer, size_t *lenp,
2376 if (unlikely(ftrace_disabled))
2379 mutex_lock(&ftrace_lock);
2381 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
2383 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
2386 last_ftrace_enabled = ftrace_enabled;
2388 if (ftrace_enabled) {
2390 ftrace_startup_sysctl();
2392 /* we are starting ftrace again */
2393 if (ftrace_list != &ftrace_list_end) {
2394 if (ftrace_list->next == &ftrace_list_end)
2395 ftrace_trace_function = ftrace_list->func;
2397 ftrace_trace_function = ftrace_list_func;
2401 /* stopping ftrace calls (just send to ftrace_stub) */
2402 ftrace_trace_function = ftrace_stub;
2404 ftrace_shutdown_sysctl();
2408 mutex_unlock(&ftrace_lock);
2412 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2414 static atomic_t ftrace_graph_active;
2415 static struct notifier_block ftrace_suspend_notifier;
2417 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
2422 /* The callbacks that hook a function */
2423 trace_func_graph_ret_t ftrace_graph_return =
2424 (trace_func_graph_ret_t)ftrace_stub;
2425 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
2427 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
2428 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
2432 unsigned long flags;
2433 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
2434 struct task_struct *g, *t;
2436 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
2437 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
2438 * sizeof(struct ftrace_ret_stack),
2440 if (!ret_stack_list[i]) {
2448 read_lock_irqsave(&tasklist_lock, flags);
2449 do_each_thread(g, t) {
2455 if (t->ret_stack == NULL) {
2456 t->curr_ret_stack = -1;
2457 /* Make sure IRQs see the -1 first: */
2459 t->ret_stack = ret_stack_list[start++];
2460 atomic_set(&t->tracing_graph_pause, 0);
2461 atomic_set(&t->trace_overrun, 0);
2463 } while_each_thread(g, t);
2466 read_unlock_irqrestore(&tasklist_lock, flags);
2468 for (i = start; i < end; i++)
2469 kfree(ret_stack_list[i]);
2473 /* Allocate a return stack for each task */
2474 static int start_graph_tracing(void)
2476 struct ftrace_ret_stack **ret_stack_list;
2479 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
2480 sizeof(struct ftrace_ret_stack *),
2483 if (!ret_stack_list)
2487 ret = alloc_retstack_tasklist(ret_stack_list);
2488 } while (ret == -EAGAIN);
2490 kfree(ret_stack_list);
2495 * Hibernation protection.
2496 * The state of the current task is too much unstable during
2497 * suspend/restore to disk. We want to protect against that.
2500 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
2504 case PM_HIBERNATION_PREPARE:
2505 pause_graph_tracing();
2508 case PM_POST_HIBERNATION:
2509 unpause_graph_tracing();
2515 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2516 trace_func_graph_ent_t entryfunc)
2520 mutex_lock(&ftrace_lock);
2522 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
2523 register_pm_notifier(&ftrace_suspend_notifier);
2525 atomic_inc(&ftrace_graph_active);
2526 ret = start_graph_tracing();
2528 atomic_dec(&ftrace_graph_active);
2532 ftrace_graph_return = retfunc;
2533 ftrace_graph_entry = entryfunc;
2535 ftrace_startup(FTRACE_START_FUNC_RET);
2538 mutex_unlock(&ftrace_lock);
2542 void unregister_ftrace_graph(void)
2544 mutex_lock(&ftrace_lock);
2546 atomic_dec(&ftrace_graph_active);
2547 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
2548 ftrace_graph_entry = ftrace_graph_entry_stub;
2549 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
2550 unregister_pm_notifier(&ftrace_suspend_notifier);
2552 mutex_unlock(&ftrace_lock);
2555 /* Allocate a return stack for newly created task */
2556 void ftrace_graph_init_task(struct task_struct *t)
2558 if (atomic_read(&ftrace_graph_active)) {
2559 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
2560 * sizeof(struct ftrace_ret_stack),
2564 t->curr_ret_stack = -1;
2565 atomic_set(&t->tracing_graph_pause, 0);
2566 atomic_set(&t->trace_overrun, 0);
2568 t->ret_stack = NULL;
2571 void ftrace_graph_exit_task(struct task_struct *t)
2573 struct ftrace_ret_stack *ret_stack = t->ret_stack;
2575 t->ret_stack = NULL;
2576 /* NULL must become visible to IRQs before we free it: */
2582 void ftrace_graph_stop(void)