2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/kprobes.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/hash.h>
29 #include <linux/list.h>
31 #include <asm/ftrace.h>
35 /* ftrace_enabled is a method to turn ftrace on or off */
36 int ftrace_enabled __read_mostly;
37 static int last_ftrace_enabled;
40 * ftrace_disabled is set when an anomaly is discovered.
41 * ftrace_disabled is much stronger than ftrace_enabled.
43 static int ftrace_disabled __read_mostly;
45 static DEFINE_SPINLOCK(ftrace_lock);
46 static DEFINE_MUTEX(ftrace_sysctl_lock);
48 static struct ftrace_ops ftrace_list_end __read_mostly =
53 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
54 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
56 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
58 struct ftrace_ops *op = ftrace_list;
60 /* in case someone actually ports this to alpha! */
61 read_barrier_depends();
63 while (op != &ftrace_list_end) {
65 read_barrier_depends();
66 op->func(ip, parent_ip);
72 * clear_ftrace_function - reset the ftrace function
74 * This NULLs the ftrace function and in essence stops
75 * tracing. There may be lag
77 void clear_ftrace_function(void)
79 ftrace_trace_function = ftrace_stub;
82 static int __register_ftrace_function(struct ftrace_ops *ops)
84 /* Should never be called by interrupts */
85 spin_lock(&ftrace_lock);
87 ops->next = ftrace_list;
89 * We are entering ops into the ftrace_list but another
90 * CPU might be walking that list. We need to make sure
91 * the ops->next pointer is valid before another CPU sees
92 * the ops pointer included into the ftrace_list.
99 * For one func, simply call it directly.
100 * For more than one func, call the chain.
102 if (ops->next == &ftrace_list_end)
103 ftrace_trace_function = ops->func;
105 ftrace_trace_function = ftrace_list_func;
108 spin_unlock(&ftrace_lock);
113 static int __unregister_ftrace_function(struct ftrace_ops *ops)
115 struct ftrace_ops **p;
118 spin_lock(&ftrace_lock);
121 * If we are removing the last function, then simply point
122 * to the ftrace_stub.
124 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
125 ftrace_trace_function = ftrace_stub;
126 ftrace_list = &ftrace_list_end;
130 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
141 if (ftrace_enabled) {
142 /* If we only have one func left, then call that directly */
143 if (ftrace_list == &ftrace_list_end ||
144 ftrace_list->next == &ftrace_list_end)
145 ftrace_trace_function = ftrace_list->func;
149 spin_unlock(&ftrace_lock);
154 #ifdef CONFIG_DYNAMIC_FTRACE
156 static struct task_struct *ftraced_task;
159 FTRACE_ENABLE_CALLS = (1 << 0),
160 FTRACE_DISABLE_CALLS = (1 << 1),
161 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
162 FTRACE_ENABLE_MCOUNT = (1 << 3),
163 FTRACE_DISABLE_MCOUNT = (1 << 4),
166 static int ftrace_filtered;
167 static int tracing_on;
168 static int frozen_record_count;
170 static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
172 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
174 static DEFINE_SPINLOCK(ftrace_shutdown_lock);
175 static DEFINE_MUTEX(ftraced_lock);
176 static DEFINE_MUTEX(ftrace_regex_lock);
179 struct ftrace_page *next;
181 struct dyn_ftrace records[];
184 #define ENTRIES_PER_PAGE \
185 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
187 /* estimate from running different kernels */
188 #define NR_TO_INIT 10000
190 static struct ftrace_page *ftrace_pages_start;
191 static struct ftrace_page *ftrace_pages;
193 static int ftraced_trigger;
194 static int ftraced_suspend;
195 static int ftraced_stop;
197 static int ftrace_record_suspend;
199 static struct dyn_ftrace *ftrace_free_records;
202 #ifdef CONFIG_KPROBES
203 static inline void freeze_record(struct dyn_ftrace *rec)
205 if (!(rec->flags & FTRACE_FL_FROZEN)) {
206 rec->flags |= FTRACE_FL_FROZEN;
207 frozen_record_count++;
211 static inline void unfreeze_record(struct dyn_ftrace *rec)
213 if (rec->flags & FTRACE_FL_FROZEN) {
214 rec->flags &= ~FTRACE_FL_FROZEN;
215 frozen_record_count--;
219 static inline int record_frozen(struct dyn_ftrace *rec)
221 return rec->flags & FTRACE_FL_FROZEN;
224 # define freeze_record(rec) ({ 0; })
225 # define unfreeze_record(rec) ({ 0; })
226 # define record_frozen(rec) ({ 0; })
227 #endif /* CONFIG_KPROBES */
229 int skip_trace(unsigned long ip)
232 struct dyn_ftrace *rec;
233 struct hlist_node *t;
234 struct hlist_head *head;
236 if (frozen_record_count == 0)
239 head = &ftrace_hash[hash_long(ip, FTRACE_HASHBITS)];
240 hlist_for_each_entry_rcu(rec, t, head, node) {
242 if (record_frozen(rec)) {
243 if (rec->flags & FTRACE_FL_FAILED)
246 if (!(rec->flags & FTRACE_FL_CONVERTED))
249 if (!tracing_on || !ftrace_enabled)
252 if (ftrace_filtered) {
253 fl = rec->flags & (FTRACE_FL_FILTER |
255 if (!fl || (fl & FTRACE_FL_NOTRACE))
267 ftrace_ip_in_hash(unsigned long ip, unsigned long key)
269 struct dyn_ftrace *p;
270 struct hlist_node *t;
273 hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) {
284 ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
286 hlist_add_head_rcu(&node->node, &ftrace_hash[key]);
289 /* called from kstop_machine */
290 static inline void ftrace_del_hash(struct dyn_ftrace *node)
292 hlist_del(&node->node);
295 static void ftrace_free_rec(struct dyn_ftrace *rec)
297 rec->ip = (unsigned long)ftrace_free_records;
298 ftrace_free_records = rec;
299 rec->flags |= FTRACE_FL_FREE;
302 void ftrace_release(void *start, unsigned long size)
304 struct dyn_ftrace *rec;
305 struct ftrace_page *pg;
306 unsigned long s = (unsigned long)start;
307 unsigned long e = s + size;
313 /* No interrupt should call this */
314 spin_lock(&ftrace_lock);
316 for (pg = ftrace_pages_start; pg; pg = pg->next) {
317 for (i = 0; i < pg->index; i++) {
318 rec = &pg->records[i];
320 if ((rec->ip >= s) && (rec->ip < e))
321 ftrace_free_rec(rec);
324 spin_unlock(&ftrace_lock);
328 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
330 struct dyn_ftrace *rec;
332 /* First check for freed records */
333 if (ftrace_free_records) {
334 rec = ftrace_free_records;
336 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
338 ftrace_free_records = NULL;
344 ftrace_free_records = (void *)rec->ip;
345 memset(rec, 0, sizeof(*rec));
349 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
350 if (!ftrace_pages->next)
352 ftrace_pages = ftrace_pages->next;
355 return &ftrace_pages->records[ftrace_pages->index++];
359 ftrace_record_ip(unsigned long ip)
361 struct dyn_ftrace *node;
368 if (!ftrace_enabled || ftrace_disabled)
371 resched = need_resched();
372 preempt_disable_notrace();
375 * We simply need to protect against recursion.
376 * Use the the raw version of smp_processor_id and not
377 * __get_cpu_var which can call debug hooks that can
378 * cause a recursive crash here.
380 cpu = raw_smp_processor_id();
381 per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
382 if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
385 if (unlikely(ftrace_record_suspend))
388 key = hash_long(ip, FTRACE_HASHBITS);
390 WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
392 if (ftrace_ip_in_hash(ip, key))
395 atomic = irqs_disabled();
397 spin_lock_irqsave(&ftrace_shutdown_lock, flags);
399 /* This ip may have hit the hash before the lock */
400 if (ftrace_ip_in_hash(ip, key))
403 node = ftrace_alloc_dyn_node(ip);
409 ftrace_add_hash(node, key);
414 spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
416 per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
418 /* prevent recursion with scheduler */
420 preempt_enable_no_resched_notrace();
422 preempt_enable_notrace();
425 #define FTRACE_ADDR ((long)(ftrace_caller))
428 __ftrace_replace_code(struct dyn_ftrace *rec,
429 unsigned char *old, unsigned char *new, int enable)
431 unsigned long ip, fl;
435 if (ftrace_filtered && enable) {
437 * If filtering is on:
439 * If this record is set to be filtered and
440 * is enabled then do nothing.
442 * If this record is set to be filtered and
443 * it is not enabled, enable it.
445 * If this record is not set to be filtered
446 * and it is not enabled do nothing.
448 * If this record is set not to trace then
451 * If this record is set not to trace and
452 * it is enabled then disable it.
454 * If this record is not set to be filtered and
455 * it is enabled, disable it.
458 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
461 if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
462 (fl == (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
463 !fl || (fl == FTRACE_FL_NOTRACE))
467 * If it is enabled disable it,
468 * otherwise enable it!
470 if (fl & FTRACE_FL_ENABLED) {
471 /* swap new and old */
473 old = ftrace_call_replace(ip, FTRACE_ADDR);
474 rec->flags &= ~FTRACE_FL_ENABLED;
476 new = ftrace_call_replace(ip, FTRACE_ADDR);
477 rec->flags |= FTRACE_FL_ENABLED;
483 * If this record is set not to trace and is
484 * not enabled, do nothing.
486 fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
487 if (fl == FTRACE_FL_NOTRACE)
490 new = ftrace_call_replace(ip, FTRACE_ADDR);
492 old = ftrace_call_replace(ip, FTRACE_ADDR);
495 if (rec->flags & FTRACE_FL_ENABLED)
497 rec->flags |= FTRACE_FL_ENABLED;
499 if (!(rec->flags & FTRACE_FL_ENABLED))
501 rec->flags &= ~FTRACE_FL_ENABLED;
505 return ftrace_modify_code(ip, old, new);
508 static void ftrace_replace_code(int enable)
511 unsigned char *new = NULL, *old = NULL;
512 struct dyn_ftrace *rec;
513 struct ftrace_page *pg;
516 old = ftrace_nop_replace();
518 new = ftrace_nop_replace();
520 for (pg = ftrace_pages_start; pg; pg = pg->next) {
521 for (i = 0; i < pg->index; i++) {
522 rec = &pg->records[i];
524 /* don't modify code that has already faulted */
525 if (rec->flags & FTRACE_FL_FAILED)
528 /* ignore updates to this record's mcount site */
529 if (get_kprobe((void *)rec->ip)) {
533 unfreeze_record(rec);
536 failed = __ftrace_replace_code(rec, old, new, enable);
537 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
538 rec->flags |= FTRACE_FL_FAILED;
539 if ((system_state == SYSTEM_BOOTING) ||
540 !core_kernel_text(rec->ip)) {
541 ftrace_del_hash(rec);
542 ftrace_free_rec(rec);
549 static void ftrace_shutdown_replenish(void)
551 if (ftrace_pages->next)
554 /* allocate another page */
555 ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
559 ftrace_code_disable(struct dyn_ftrace *rec)
562 unsigned char *nop, *call;
567 nop = ftrace_nop_replace();
568 call = ftrace_call_replace(ip, MCOUNT_ADDR);
570 failed = ftrace_modify_code(ip, call, nop);
572 rec->flags |= FTRACE_FL_FAILED;
578 static int __ftrace_update_code(void *ignore);
580 static int __ftrace_modify_code(void *data)
585 if (*command & FTRACE_ENABLE_CALLS) {
587 * Update any recorded ips now that we have the
590 __ftrace_update_code(NULL);
591 ftrace_replace_code(1);
593 } else if (*command & FTRACE_DISABLE_CALLS) {
594 ftrace_replace_code(0);
598 if (*command & FTRACE_UPDATE_TRACE_FUNC)
599 ftrace_update_ftrace_func(ftrace_trace_function);
601 if (*command & FTRACE_ENABLE_MCOUNT) {
602 addr = (unsigned long)ftrace_record_ip;
603 ftrace_mcount_set(&addr);
604 } else if (*command & FTRACE_DISABLE_MCOUNT) {
605 addr = (unsigned long)ftrace_stub;
606 ftrace_mcount_set(&addr);
612 static void ftrace_run_update_code(int command)
614 stop_machine(__ftrace_modify_code, &command, NULL);
617 void ftrace_disable_daemon(void)
619 /* Stop the daemon from calling kstop_machine */
620 mutex_lock(&ftraced_lock);
622 mutex_unlock(&ftraced_lock);
624 ftrace_force_update();
627 void ftrace_enable_daemon(void)
629 mutex_lock(&ftraced_lock);
631 mutex_unlock(&ftraced_lock);
633 ftrace_force_update();
636 static ftrace_func_t saved_ftrace_func;
638 static void ftrace_startup(void)
642 if (unlikely(ftrace_disabled))
645 mutex_lock(&ftraced_lock);
647 if (ftraced_suspend == 1)
648 command |= FTRACE_ENABLE_CALLS;
650 if (saved_ftrace_func != ftrace_trace_function) {
651 saved_ftrace_func = ftrace_trace_function;
652 command |= FTRACE_UPDATE_TRACE_FUNC;
655 if (!command || !ftrace_enabled)
658 ftrace_run_update_code(command);
660 mutex_unlock(&ftraced_lock);
663 static void ftrace_shutdown(void)
667 if (unlikely(ftrace_disabled))
670 mutex_lock(&ftraced_lock);
672 if (!ftraced_suspend)
673 command |= FTRACE_DISABLE_CALLS;
675 if (saved_ftrace_func != ftrace_trace_function) {
676 saved_ftrace_func = ftrace_trace_function;
677 command |= FTRACE_UPDATE_TRACE_FUNC;
680 if (!command || !ftrace_enabled)
683 ftrace_run_update_code(command);
685 mutex_unlock(&ftraced_lock);
688 static void ftrace_startup_sysctl(void)
690 int command = FTRACE_ENABLE_MCOUNT;
692 if (unlikely(ftrace_disabled))
695 mutex_lock(&ftraced_lock);
696 /* Force update next time */
697 saved_ftrace_func = NULL;
698 /* ftraced_suspend is true if we want ftrace running */
700 command |= FTRACE_ENABLE_CALLS;
702 ftrace_run_update_code(command);
703 mutex_unlock(&ftraced_lock);
706 static void ftrace_shutdown_sysctl(void)
708 int command = FTRACE_DISABLE_MCOUNT;
710 if (unlikely(ftrace_disabled))
713 mutex_lock(&ftraced_lock);
714 /* ftraced_suspend is true if ftrace is running */
716 command |= FTRACE_DISABLE_CALLS;
718 ftrace_run_update_code(command);
719 mutex_unlock(&ftraced_lock);
722 static cycle_t ftrace_update_time;
723 static unsigned long ftrace_update_cnt;
724 unsigned long ftrace_update_tot_cnt;
726 static int __ftrace_update_code(void *ignore)
728 int i, save_ftrace_enabled;
730 struct dyn_ftrace *p;
731 struct hlist_node *t, *n;
732 struct hlist_head *head, temp_list;
734 /* Don't be recording funcs now */
735 ftrace_record_suspend++;
736 save_ftrace_enabled = ftrace_enabled;
739 start = ftrace_now(raw_smp_processor_id());
740 ftrace_update_cnt = 0;
742 /* No locks needed, the machine is stopped! */
743 for (i = 0; i < FTRACE_HASHSIZE; i++) {
744 INIT_HLIST_HEAD(&temp_list);
745 head = &ftrace_hash[i];
747 /* all CPUS are stopped, we are safe to modify code */
748 hlist_for_each_entry_safe(p, t, n, head, node) {
749 /* Skip over failed records which have not been
751 if (p->flags & FTRACE_FL_FAILED)
754 /* Unconverted records are always at the head of the
755 * hash bucket. Once we encounter a converted record,
756 * simply skip over to the next bucket. Saves ftraced
757 * some processor cycles (ftrace does its bid for
758 * global warming :-p ). */
759 if (p->flags & (FTRACE_FL_CONVERTED))
762 /* Ignore updates to this record's mcount site.
763 * Reintroduce this record at the head of this
764 * bucket to attempt to "convert" it again if
765 * the kprobe on it is unregistered before the
767 if (get_kprobe((void *)p->ip)) {
769 INIT_HLIST_NODE(&p->node);
770 hlist_add_head(&p->node, &temp_list);
777 /* convert record (i.e, patch mcount-call with NOP) */
778 if (ftrace_code_disable(p)) {
779 p->flags |= FTRACE_FL_CONVERTED;
782 if ((system_state == SYSTEM_BOOTING) ||
783 !core_kernel_text(p->ip)) {
790 hlist_for_each_entry_safe(p, t, n, &temp_list, node) {
792 INIT_HLIST_NODE(&p->node);
793 hlist_add_head(&p->node, head);
797 stop = ftrace_now(raw_smp_processor_id());
798 ftrace_update_time = stop - start;
799 ftrace_update_tot_cnt += ftrace_update_cnt;
802 ftrace_enabled = save_ftrace_enabled;
803 ftrace_record_suspend--;
808 static int ftrace_update_code(void)
810 if (unlikely(ftrace_disabled) ||
811 !ftrace_enabled || !ftraced_trigger)
814 stop_machine(__ftrace_update_code, NULL, NULL);
819 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
821 struct ftrace_page *pg;
825 /* allocate a few pages */
826 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
827 if (!ftrace_pages_start)
831 * Allocate a few more pages.
833 * TODO: have some parser search vmlinux before
834 * final linking to find all calls to ftrace.
836 * a) know how many pages to allocate.
838 * b) set up the table then.
840 * The dynamic code is still necessary for
844 pg = ftrace_pages = ftrace_pages_start;
846 cnt = num_to_init / ENTRIES_PER_PAGE;
847 pr_info("ftrace: allocating %ld hash entries in %d pages\n",
850 for (i = 0; i < cnt; i++) {
851 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
853 /* If we fail, we'll try later anyway */
864 FTRACE_ITER_FILTER = (1 << 0),
865 FTRACE_ITER_CONT = (1 << 1),
866 FTRACE_ITER_NOTRACE = (1 << 2),
867 FTRACE_ITER_FAILURES = (1 << 3),
870 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
872 struct ftrace_iterator {
874 struct ftrace_page *pg;
877 unsigned char buffer[FTRACE_BUFF_MAX+1];
883 t_next(struct seq_file *m, void *v, loff_t *pos)
885 struct ftrace_iterator *iter = m->private;
886 struct dyn_ftrace *rec = NULL;
891 if (iter->idx >= iter->pg->index) {
892 if (iter->pg->next) {
893 iter->pg = iter->pg->next;
898 rec = &iter->pg->records[iter->idx++];
899 if ((rec->flags & FTRACE_FL_FREE) ||
901 (!(iter->flags & FTRACE_ITER_FAILURES) &&
902 (rec->flags & FTRACE_FL_FAILED)) ||
904 ((iter->flags & FTRACE_ITER_FAILURES) &&
905 !(rec->flags & FTRACE_FL_FAILED)) ||
907 ((iter->flags & FTRACE_ITER_NOTRACE) &&
908 !(rec->flags & FTRACE_FL_NOTRACE))) {
919 static void *t_start(struct seq_file *m, loff_t *pos)
921 struct ftrace_iterator *iter = m->private;
925 if (*pos != iter->pos) {
926 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
930 p = t_next(m, p, &l);
936 static void t_stop(struct seq_file *m, void *p)
940 static int t_show(struct seq_file *m, void *v)
942 struct dyn_ftrace *rec = v;
943 char str[KSYM_SYMBOL_LEN];
948 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
950 seq_printf(m, "%s\n", str);
955 static struct seq_operations show_ftrace_seq_ops = {
963 ftrace_avail_open(struct inode *inode, struct file *file)
965 struct ftrace_iterator *iter;
968 if (unlikely(ftrace_disabled))
971 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
975 iter->pg = ftrace_pages_start;
978 ret = seq_open(file, &show_ftrace_seq_ops);
980 struct seq_file *m = file->private_data;
990 int ftrace_avail_release(struct inode *inode, struct file *file)
992 struct seq_file *m = (struct seq_file *)file->private_data;
993 struct ftrace_iterator *iter = m->private;
995 seq_release(inode, file);
1002 ftrace_failures_open(struct inode *inode, struct file *file)
1006 struct ftrace_iterator *iter;
1008 ret = ftrace_avail_open(inode, file);
1010 m = (struct seq_file *)file->private_data;
1011 iter = (struct ftrace_iterator *)m->private;
1012 iter->flags = FTRACE_ITER_FAILURES;
1019 static void ftrace_filter_reset(int enable)
1021 struct ftrace_page *pg;
1022 struct dyn_ftrace *rec;
1023 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1026 /* keep kstop machine from running */
1029 ftrace_filtered = 0;
1030 pg = ftrace_pages_start;
1032 for (i = 0; i < pg->index; i++) {
1033 rec = &pg->records[i];
1034 if (rec->flags & FTRACE_FL_FAILED)
1036 rec->flags &= ~type;
1044 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1046 struct ftrace_iterator *iter;
1049 if (unlikely(ftrace_disabled))
1052 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1056 mutex_lock(&ftrace_regex_lock);
1057 if ((file->f_mode & FMODE_WRITE) &&
1058 !(file->f_flags & O_APPEND))
1059 ftrace_filter_reset(enable);
1061 if (file->f_mode & FMODE_READ) {
1062 iter->pg = ftrace_pages_start;
1064 iter->flags = enable ? FTRACE_ITER_FILTER :
1065 FTRACE_ITER_NOTRACE;
1067 ret = seq_open(file, &show_ftrace_seq_ops);
1069 struct seq_file *m = file->private_data;
1074 file->private_data = iter;
1075 mutex_unlock(&ftrace_regex_lock);
1081 ftrace_filter_open(struct inode *inode, struct file *file)
1083 return ftrace_regex_open(inode, file, 1);
1087 ftrace_notrace_open(struct inode *inode, struct file *file)
1089 return ftrace_regex_open(inode, file, 0);
1093 ftrace_regex_read(struct file *file, char __user *ubuf,
1094 size_t cnt, loff_t *ppos)
1096 if (file->f_mode & FMODE_READ)
1097 return seq_read(file, ubuf, cnt, ppos);
1103 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1107 if (file->f_mode & FMODE_READ)
1108 ret = seq_lseek(file, offset, origin);
1110 file->f_pos = ret = 1;
1123 ftrace_match(unsigned char *buff, int len, int enable)
1125 char str[KSYM_SYMBOL_LEN];
1126 char *search = NULL;
1127 struct ftrace_page *pg;
1128 struct dyn_ftrace *rec;
1129 int type = MATCH_FULL;
1130 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1131 unsigned i, match = 0, search_len = 0;
1133 for (i = 0; i < len; i++) {
1134 if (buff[i] == '*') {
1136 search = buff + i + 1;
1137 type = MATCH_END_ONLY;
1138 search_len = len - (i + 1);
1140 if (type == MATCH_END_ONLY) {
1141 type = MATCH_MIDDLE_ONLY;
1144 type = MATCH_FRONT_ONLY;
1152 /* keep kstop machine from running */
1155 ftrace_filtered = 1;
1156 pg = ftrace_pages_start;
1158 for (i = 0; i < pg->index; i++) {
1162 rec = &pg->records[i];
1163 if (rec->flags & FTRACE_FL_FAILED)
1165 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1168 if (strcmp(str, buff) == 0)
1171 case MATCH_FRONT_ONLY:
1172 if (memcmp(str, buff, match) == 0)
1175 case MATCH_MIDDLE_ONLY:
1176 if (strstr(str, search))
1179 case MATCH_END_ONLY:
1180 ptr = strstr(str, search);
1181 if (ptr && (ptr[search_len] == 0))
1194 ftrace_regex_write(struct file *file, const char __user *ubuf,
1195 size_t cnt, loff_t *ppos, int enable)
1197 struct ftrace_iterator *iter;
1202 if (!cnt || cnt < 0)
1205 mutex_lock(&ftrace_regex_lock);
1207 if (file->f_mode & FMODE_READ) {
1208 struct seq_file *m = file->private_data;
1211 iter = file->private_data;
1214 iter->flags &= ~FTRACE_ITER_CONT;
1215 iter->buffer_idx = 0;
1218 ret = get_user(ch, ubuf++);
1224 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1225 /* skip white space */
1226 while (cnt && isspace(ch)) {
1227 ret = get_user(ch, ubuf++);
1235 file->f_pos += read;
1240 iter->buffer_idx = 0;
1243 while (cnt && !isspace(ch)) {
1244 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1245 iter->buffer[iter->buffer_idx++] = ch;
1250 ret = get_user(ch, ubuf++);
1259 iter->buffer[iter->buffer_idx] = 0;
1260 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1261 iter->buffer_idx = 0;
1263 iter->flags |= FTRACE_ITER_CONT;
1266 file->f_pos += read;
1270 mutex_unlock(&ftrace_regex_lock);
1276 ftrace_filter_write(struct file *file, const char __user *ubuf,
1277 size_t cnt, loff_t *ppos)
1279 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1283 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1284 size_t cnt, loff_t *ppos)
1286 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1290 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1292 if (unlikely(ftrace_disabled))
1295 mutex_lock(&ftrace_regex_lock);
1297 ftrace_filter_reset(enable);
1299 ftrace_match(buf, len, enable);
1300 mutex_unlock(&ftrace_regex_lock);
1304 * ftrace_set_filter - set a function to filter on in ftrace
1305 * @buf - the string that holds the function filter text.
1306 * @len - the length of the string.
1307 * @reset - non zero to reset all filters before applying this filter.
1309 * Filters denote which functions should be enabled when tracing is enabled.
1310 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1312 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1314 ftrace_set_regex(buf, len, reset, 1);
1318 * ftrace_set_notrace - set a function to not trace in ftrace
1319 * @buf - the string that holds the function notrace text.
1320 * @len - the length of the string.
1321 * @reset - non zero to reset all filters before applying this filter.
1323 * Notrace Filters denote which functions should not be enabled when tracing
1324 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1327 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1329 ftrace_set_regex(buf, len, reset, 0);
1333 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1335 struct seq_file *m = (struct seq_file *)file->private_data;
1336 struct ftrace_iterator *iter;
1338 mutex_lock(&ftrace_regex_lock);
1339 if (file->f_mode & FMODE_READ) {
1342 seq_release(inode, file);
1344 iter = file->private_data;
1346 if (iter->buffer_idx) {
1348 iter->buffer[iter->buffer_idx] = 0;
1349 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1352 mutex_lock(&ftrace_sysctl_lock);
1353 mutex_lock(&ftraced_lock);
1354 if (iter->filtered && ftraced_suspend && ftrace_enabled)
1355 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1356 mutex_unlock(&ftraced_lock);
1357 mutex_unlock(&ftrace_sysctl_lock);
1360 mutex_unlock(&ftrace_regex_lock);
1365 ftrace_filter_release(struct inode *inode, struct file *file)
1367 return ftrace_regex_release(inode, file, 1);
1371 ftrace_notrace_release(struct inode *inode, struct file *file)
1373 return ftrace_regex_release(inode, file, 0);
1377 ftraced_read(struct file *filp, char __user *ubuf,
1378 size_t cnt, loff_t *ppos)
1380 /* don't worry about races */
1381 char *buf = ftraced_stop ? "disabled\n" : "enabled\n";
1382 int r = strlen(buf);
1384 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1388 ftraced_write(struct file *filp, const char __user *ubuf,
1389 size_t cnt, loff_t *ppos)
1395 if (cnt >= sizeof(buf))
1398 if (copy_from_user(&buf, ubuf, cnt))
1401 if (strncmp(buf, "enable", 6) == 0)
1403 else if (strncmp(buf, "disable", 7) == 0)
1408 ret = strict_strtoul(buf, 10, &val);
1416 ftrace_enable_daemon();
1418 ftrace_disable_daemon();
1425 static struct file_operations ftrace_avail_fops = {
1426 .open = ftrace_avail_open,
1428 .llseek = seq_lseek,
1429 .release = ftrace_avail_release,
1432 static struct file_operations ftrace_failures_fops = {
1433 .open = ftrace_failures_open,
1435 .llseek = seq_lseek,
1436 .release = ftrace_avail_release,
1439 static struct file_operations ftrace_filter_fops = {
1440 .open = ftrace_filter_open,
1441 .read = ftrace_regex_read,
1442 .write = ftrace_filter_write,
1443 .llseek = ftrace_regex_lseek,
1444 .release = ftrace_filter_release,
1447 static struct file_operations ftrace_notrace_fops = {
1448 .open = ftrace_notrace_open,
1449 .read = ftrace_regex_read,
1450 .write = ftrace_notrace_write,
1451 .llseek = ftrace_regex_lseek,
1452 .release = ftrace_notrace_release,
1455 static struct file_operations ftraced_fops = {
1456 .open = tracing_open_generic,
1457 .read = ftraced_read,
1458 .write = ftraced_write,
1462 * ftrace_force_update - force an update to all recording ftrace functions
1464 int ftrace_force_update(void)
1468 if (unlikely(ftrace_disabled))
1471 mutex_lock(&ftrace_sysctl_lock);
1472 mutex_lock(&ftraced_lock);
1475 * If ftraced_trigger is not set, then there is nothing
1478 if (ftraced_trigger && !ftrace_update_code())
1481 mutex_unlock(&ftraced_lock);
1482 mutex_unlock(&ftrace_sysctl_lock);
1487 static void ftrace_force_shutdown(void)
1489 struct task_struct *task;
1490 int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
1492 mutex_lock(&ftraced_lock);
1493 task = ftraced_task;
1494 ftraced_task = NULL;
1495 ftraced_suspend = -1;
1496 ftrace_run_update_code(command);
1497 mutex_unlock(&ftraced_lock);
1503 static __init int ftrace_init_debugfs(void)
1505 struct dentry *d_tracer;
1506 struct dentry *entry;
1508 d_tracer = tracing_init_dentry();
1510 entry = debugfs_create_file("available_filter_functions", 0444,
1511 d_tracer, NULL, &ftrace_avail_fops);
1513 pr_warning("Could not create debugfs "
1514 "'available_filter_functions' entry\n");
1516 entry = debugfs_create_file("failures", 0444,
1517 d_tracer, NULL, &ftrace_failures_fops);
1519 pr_warning("Could not create debugfs 'failures' entry\n");
1521 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1522 NULL, &ftrace_filter_fops);
1524 pr_warning("Could not create debugfs "
1525 "'set_ftrace_filter' entry\n");
1527 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1528 NULL, &ftrace_notrace_fops);
1530 pr_warning("Could not create debugfs "
1531 "'set_ftrace_notrace' entry\n");
1533 entry = debugfs_create_file("ftraced_enabled", 0644, d_tracer,
1534 NULL, &ftraced_fops);
1536 pr_warning("Could not create debugfs "
1537 "'ftraced_enabled' entry\n");
1541 fs_initcall(ftrace_init_debugfs);
1543 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
1544 static int ftrace_convert_nops(unsigned long *start,
1549 unsigned long flags;
1553 addr = ftrace_call_adjust(*p++);
1554 spin_lock(&ftrace_lock);
1555 ftrace_record_ip(addr);
1556 spin_unlock(&ftrace_lock);
1557 ftrace_shutdown_replenish();
1561 local_irq_save(flags);
1562 __ftrace_update_code(p);
1563 local_irq_restore(flags);
1568 void ftrace_init_module(unsigned long *start, unsigned long *end)
1572 ftrace_convert_nops(start, end);
1575 extern unsigned long __start_mcount_loc[];
1576 extern unsigned long __stop_mcount_loc[];
1578 void __init ftrace_init(void)
1580 unsigned long count, addr, flags;
1583 /* Keep the ftrace pointer to the stub */
1584 addr = (unsigned long)ftrace_stub;
1586 local_irq_save(flags);
1587 ftrace_dyn_arch_init(&addr);
1588 local_irq_restore(flags);
1590 /* ftrace_dyn_arch_init places the return code in addr */
1594 count = __stop_mcount_loc - __start_mcount_loc;
1596 ret = ftrace_dyn_table_alloc(count);
1600 last_ftrace_enabled = ftrace_enabled = 1;
1602 ret = ftrace_convert_nops(__start_mcount_loc,
1607 ftrace_disabled = 1;
1609 #else /* CONFIG_FTRACE_MCOUNT_RECORD */
1610 static int ftraced(void *ignore)
1612 unsigned long usecs;
1614 while (!kthread_should_stop()) {
1616 set_current_state(TASK_INTERRUPTIBLE);
1618 /* check once a second */
1619 schedule_timeout(HZ);
1621 if (unlikely(ftrace_disabled))
1624 mutex_lock(&ftrace_sysctl_lock);
1625 mutex_lock(&ftraced_lock);
1626 if (!ftraced_suspend && !ftraced_stop &&
1627 ftrace_update_code()) {
1628 usecs = nsecs_to_usecs(ftrace_update_time);
1629 if (ftrace_update_tot_cnt > 100000) {
1630 ftrace_update_tot_cnt = 0;
1631 pr_info("hm, dftrace overflow: %lu change%s"
1632 " (%lu total) in %lu usec%s\n",
1634 ftrace_update_cnt != 1 ? "s" : "",
1635 ftrace_update_tot_cnt,
1636 usecs, usecs != 1 ? "s" : "");
1637 ftrace_disabled = 1;
1641 mutex_unlock(&ftraced_lock);
1642 mutex_unlock(&ftrace_sysctl_lock);
1644 ftrace_shutdown_replenish();
1646 __set_current_state(TASK_RUNNING);
1650 static int __init ftrace_dynamic_init(void)
1652 struct task_struct *p;
1656 addr = (unsigned long)ftrace_record_ip;
1658 stop_machine(ftrace_dyn_arch_init, &addr, NULL);
1660 /* ftrace_dyn_arch_init places the return code in addr */
1666 ret = ftrace_dyn_table_alloc(NR_TO_INIT);
1670 p = kthread_run(ftraced, NULL, "ftraced");
1676 last_ftrace_enabled = ftrace_enabled = 1;
1682 ftrace_disabled = 1;
1686 core_initcall(ftrace_dynamic_init);
1687 #endif /* CONFIG_FTRACE_MCOUNT_RECORD */
1690 # define ftrace_startup() do { } while (0)
1691 # define ftrace_shutdown() do { } while (0)
1692 # define ftrace_startup_sysctl() do { } while (0)
1693 # define ftrace_shutdown_sysctl() do { } while (0)
1694 # define ftrace_force_shutdown() do { } while (0)
1695 #endif /* CONFIG_DYNAMIC_FTRACE */
1698 * ftrace_kill_atomic - kill ftrace from critical sections
1700 * This function should be used by panic code. It stops ftrace
1701 * but in a not so nice way. If you need to simply kill ftrace
1702 * from a non-atomic section, use ftrace_kill.
1704 void ftrace_kill_atomic(void)
1706 ftrace_disabled = 1;
1708 #ifdef CONFIG_DYNAMIC_FTRACE
1709 ftraced_suspend = -1;
1711 clear_ftrace_function();
1715 * ftrace_kill - totally shutdown ftrace
1717 * This is a safety measure. If something was detected that seems
1718 * wrong, calling this function will keep ftrace from doing
1719 * any more modifications, and updates.
1720 * used when something went wrong.
1722 void ftrace_kill(void)
1724 mutex_lock(&ftrace_sysctl_lock);
1725 ftrace_disabled = 1;
1728 clear_ftrace_function();
1729 mutex_unlock(&ftrace_sysctl_lock);
1731 /* Try to totally disable ftrace */
1732 ftrace_force_shutdown();
1736 * register_ftrace_function - register a function for profiling
1737 * @ops - ops structure that holds the function for profiling.
1739 * Register a function to be called by all functions in the
1742 * Note: @ops->func and all the functions it calls must be labeled
1743 * with "notrace", otherwise it will go into a
1746 int register_ftrace_function(struct ftrace_ops *ops)
1750 if (unlikely(ftrace_disabled))
1753 mutex_lock(&ftrace_sysctl_lock);
1754 ret = __register_ftrace_function(ops);
1756 mutex_unlock(&ftrace_sysctl_lock);
1762 * unregister_ftrace_function - unresgister a function for profiling.
1763 * @ops - ops structure that holds the function to unregister
1765 * Unregister a function that was added to be called by ftrace profiling.
1767 int unregister_ftrace_function(struct ftrace_ops *ops)
1771 mutex_lock(&ftrace_sysctl_lock);
1772 ret = __unregister_ftrace_function(ops);
1774 mutex_unlock(&ftrace_sysctl_lock);
1780 ftrace_enable_sysctl(struct ctl_table *table, int write,
1781 struct file *file, void __user *buffer, size_t *lenp,
1786 if (unlikely(ftrace_disabled))
1789 mutex_lock(&ftrace_sysctl_lock);
1791 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
1793 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1796 last_ftrace_enabled = ftrace_enabled;
1798 if (ftrace_enabled) {
1800 ftrace_startup_sysctl();
1802 /* we are starting ftrace again */
1803 if (ftrace_list != &ftrace_list_end) {
1804 if (ftrace_list->next == &ftrace_list_end)
1805 ftrace_trace_function = ftrace_list->func;
1807 ftrace_trace_function = ftrace_list_func;
1811 /* stopping ftrace calls (just send to ftrace_stub) */
1812 ftrace_trace_function = ftrace_stub;
1814 ftrace_shutdown_sysctl();
1818 mutex_unlock(&ftrace_sysctl_lock);