2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/kthread.h>
22 #include <linux/hardirq.h>
23 #include <linux/ftrace.h>
24 #include <linux/uaccess.h>
25 #include <linux/sysctl.h>
26 #include <linux/hash.h>
27 #include <linux/ctype.h>
28 #include <linux/list.h>
33 static int last_ftrace_enabled;
35 static DEFINE_SPINLOCK(ftrace_lock);
36 static DEFINE_MUTEX(ftrace_sysctl_lock);
38 static struct ftrace_ops ftrace_list_end __read_mostly =
43 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
44 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
46 /* mcount is defined per arch in assembly */
47 EXPORT_SYMBOL(mcount);
49 notrace void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
51 struct ftrace_ops *op = ftrace_list;
53 /* in case someone actually ports this to alpha! */
54 read_barrier_depends();
56 while (op != &ftrace_list_end) {
58 read_barrier_depends();
59 op->func(ip, parent_ip);
65 * clear_ftrace_function - reset the ftrace function
67 * This NULLs the ftrace function and in essence stops
68 * tracing. There may be lag
70 void clear_ftrace_function(void)
72 ftrace_trace_function = ftrace_stub;
75 static int notrace __register_ftrace_function(struct ftrace_ops *ops)
77 /* Should never be called by interrupts */
78 spin_lock(&ftrace_lock);
80 ops->next = ftrace_list;
82 * We are entering ops into the ftrace_list but another
83 * CPU might be walking that list. We need to make sure
84 * the ops->next pointer is valid before another CPU sees
85 * the ops pointer included into the ftrace_list.
92 * For one func, simply call it directly.
93 * For more than one func, call the chain.
95 if (ops->next == &ftrace_list_end)
96 ftrace_trace_function = ops->func;
98 ftrace_trace_function = ftrace_list_func;
101 spin_unlock(&ftrace_lock);
106 static int notrace __unregister_ftrace_function(struct ftrace_ops *ops)
108 struct ftrace_ops **p;
111 spin_lock(&ftrace_lock);
114 * If we are removing the last function, then simply point
115 * to the ftrace_stub.
117 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
118 ftrace_trace_function = ftrace_stub;
119 ftrace_list = &ftrace_list_end;
123 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
134 if (ftrace_enabled) {
135 /* If we only have one func left, then call that directly */
136 if (ftrace_list == &ftrace_list_end ||
137 ftrace_list->next == &ftrace_list_end)
138 ftrace_trace_function = ftrace_list->func;
142 spin_unlock(&ftrace_lock);
147 #ifdef CONFIG_DYNAMIC_FTRACE
149 static struct task_struct *ftraced_task;
150 static DECLARE_WAIT_QUEUE_HEAD(ftraced_waiters);
151 static unsigned long ftraced_iteration_counter;
154 FTRACE_ENABLE_CALLS = (1 << 0),
155 FTRACE_DISABLE_CALLS = (1 << 1),
156 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
157 FTRACE_ENABLE_MCOUNT = (1 << 3),
158 FTRACE_DISABLE_MCOUNT = (1 << 4),
161 static int ftrace_filtered;
163 static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
165 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
167 static DEFINE_SPINLOCK(ftrace_shutdown_lock);
168 static DEFINE_MUTEX(ftraced_lock);
169 static DEFINE_MUTEX(ftrace_filter_lock);
172 struct ftrace_page *next;
174 struct dyn_ftrace records[];
175 } __attribute__((packed));
177 #define ENTRIES_PER_PAGE \
178 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
180 /* estimate from running different kernels */
181 #define NR_TO_INIT 10000
183 static struct ftrace_page *ftrace_pages_start;
184 static struct ftrace_page *ftrace_pages;
186 static int ftraced_trigger;
187 static int ftraced_suspend;
189 static int ftrace_record_suspend;
191 static struct dyn_ftrace *ftrace_free_records;
194 notrace ftrace_ip_in_hash(unsigned long ip, unsigned long key)
196 struct dyn_ftrace *p;
197 struct hlist_node *t;
200 hlist_for_each_entry(p, t, &ftrace_hash[key], node) {
210 static inline void notrace
211 ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
213 hlist_add_head(&node->node, &ftrace_hash[key]);
216 static notrace void ftrace_free_rec(struct dyn_ftrace *rec)
218 /* no locking, only called from kstop_machine */
220 rec->ip = (unsigned long)ftrace_free_records;
221 ftrace_free_records = rec;
222 rec->flags |= FTRACE_FL_FREE;
225 static notrace struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
227 struct dyn_ftrace *rec;
229 /* First check for freed records */
230 if (ftrace_free_records) {
231 rec = ftrace_free_records;
233 /* todo, disable tracing altogether on this warning */
234 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
236 ftrace_free_records = NULL;
240 ftrace_free_records = (void *)rec->ip;
241 memset(rec, 0, sizeof(*rec));
245 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
246 if (!ftrace_pages->next)
248 ftrace_pages = ftrace_pages->next;
251 return &ftrace_pages->records[ftrace_pages->index++];
255 ftrace_record_ip(unsigned long ip)
257 struct dyn_ftrace *node;
266 resched = need_resched();
267 preempt_disable_notrace();
269 /* We simply need to protect against recursion */
270 __get_cpu_var(ftrace_shutdown_disable_cpu)++;
271 if (__get_cpu_var(ftrace_shutdown_disable_cpu) != 1)
274 if (unlikely(ftrace_record_suspend))
277 key = hash_long(ip, FTRACE_HASHBITS);
279 WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
281 if (ftrace_ip_in_hash(ip, key))
284 atomic = irqs_disabled();
286 spin_lock_irqsave(&ftrace_shutdown_lock, flags);
288 /* This ip may have hit the hash before the lock */
289 if (ftrace_ip_in_hash(ip, key))
293 * There's a slight race that the ftraced will update the
294 * hash and reset here. If it is already converted, skip it.
296 if (ftrace_ip_converted(ip))
299 node = ftrace_alloc_dyn_node(ip);
305 ftrace_add_hash(node, key);
310 spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
312 __get_cpu_var(ftrace_shutdown_disable_cpu)--;
314 /* prevent recursion with scheduler */
316 preempt_enable_no_resched_notrace();
318 preempt_enable_notrace();
321 #define FTRACE_ADDR ((long)(&ftrace_caller))
322 #define MCOUNT_ADDR ((long)(&mcount))
325 __ftrace_replace_code(struct dyn_ftrace *rec,
326 unsigned char *old, unsigned char *new, int enable)
333 if (ftrace_filtered && enable) {
336 * If filtering is on:
338 * If this record is set to be filtered and
339 * is enabled then do nothing.
341 * If this record is set to be filtered and
342 * it is not enabled, enable it.
344 * If this record is not set to be filtered
345 * and it is not enabled do nothing.
347 * If this record is not set to be filtered and
348 * it is enabled, disable it.
350 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
352 if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
357 * If it is enabled disable it,
358 * otherwise enable it!
360 if (fl == FTRACE_FL_ENABLED) {
361 /* swap new and old */
363 old = ftrace_call_replace(ip, FTRACE_ADDR);
364 rec->flags &= ~FTRACE_FL_ENABLED;
366 new = ftrace_call_replace(ip, FTRACE_ADDR);
367 rec->flags |= FTRACE_FL_ENABLED;
372 new = ftrace_call_replace(ip, FTRACE_ADDR);
374 old = ftrace_call_replace(ip, FTRACE_ADDR);
377 if (rec->flags & FTRACE_FL_ENABLED)
379 rec->flags |= FTRACE_FL_ENABLED;
381 if (!(rec->flags & FTRACE_FL_ENABLED))
383 rec->flags &= ~FTRACE_FL_ENABLED;
387 failed = ftrace_modify_code(ip, old, new);
390 /* It is possible that the function hasn't been converted yet */
391 key = hash_long(ip, FTRACE_HASHBITS);
392 if (!ftrace_ip_in_hash(ip, key)) {
393 rec->flags |= FTRACE_FL_FAILED;
394 ftrace_free_rec(rec);
400 static void notrace ftrace_replace_code(int enable)
402 unsigned char *new = NULL, *old = NULL;
403 struct dyn_ftrace *rec;
404 struct ftrace_page *pg;
408 old = ftrace_nop_replace();
410 new = ftrace_nop_replace();
412 for (pg = ftrace_pages_start; pg; pg = pg->next) {
413 for (i = 0; i < pg->index; i++) {
414 rec = &pg->records[i];
416 /* don't modify code that has already faulted */
417 if (rec->flags & FTRACE_FL_FAILED)
420 __ftrace_replace_code(rec, old, new, enable);
425 static notrace void ftrace_shutdown_replenish(void)
427 if (ftrace_pages->next)
430 /* allocate another page */
431 ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
435 ftrace_code_disable(struct dyn_ftrace *rec)
438 unsigned char *nop, *call;
443 nop = ftrace_nop_replace();
444 call = ftrace_call_replace(ip, MCOUNT_ADDR);
446 failed = ftrace_modify_code(ip, call, nop);
448 rec->flags |= FTRACE_FL_FAILED;
449 ftrace_free_rec(rec);
453 static int notrace __ftrace_modify_code(void *data)
458 if (*command & FTRACE_ENABLE_CALLS)
459 ftrace_replace_code(1);
460 else if (*command & FTRACE_DISABLE_CALLS)
461 ftrace_replace_code(0);
463 if (*command & FTRACE_UPDATE_TRACE_FUNC)
464 ftrace_update_ftrace_func(ftrace_trace_function);
466 if (*command & FTRACE_ENABLE_MCOUNT) {
467 addr = (unsigned long)ftrace_record_ip;
468 ftrace_mcount_set(&addr);
469 } else if (*command & FTRACE_DISABLE_MCOUNT) {
470 addr = (unsigned long)ftrace_stub;
471 ftrace_mcount_set(&addr);
477 static void notrace ftrace_run_update_code(int command)
479 stop_machine_run(__ftrace_modify_code, &command, NR_CPUS);
482 static ftrace_func_t saved_ftrace_func;
484 static void notrace ftrace_startup(void)
488 mutex_lock(&ftraced_lock);
490 if (ftraced_suspend == 1)
491 command |= FTRACE_ENABLE_CALLS;
493 if (saved_ftrace_func != ftrace_trace_function) {
494 saved_ftrace_func = ftrace_trace_function;
495 command |= FTRACE_UPDATE_TRACE_FUNC;
498 if (!command || !ftrace_enabled)
501 ftrace_run_update_code(command);
503 mutex_unlock(&ftraced_lock);
506 static void notrace ftrace_shutdown(void)
510 mutex_lock(&ftraced_lock);
512 if (!ftraced_suspend)
513 command |= FTRACE_DISABLE_CALLS;
515 if (saved_ftrace_func != ftrace_trace_function) {
516 saved_ftrace_func = ftrace_trace_function;
517 command |= FTRACE_UPDATE_TRACE_FUNC;
520 if (!command || !ftrace_enabled)
523 ftrace_run_update_code(command);
525 mutex_unlock(&ftraced_lock);
528 static void notrace ftrace_startup_sysctl(void)
530 int command = FTRACE_ENABLE_MCOUNT;
532 mutex_lock(&ftraced_lock);
533 /* Force update next time */
534 saved_ftrace_func = NULL;
535 /* ftraced_suspend is true if we want ftrace running */
537 command |= FTRACE_ENABLE_CALLS;
539 ftrace_run_update_code(command);
540 mutex_unlock(&ftraced_lock);
543 static void notrace ftrace_shutdown_sysctl(void)
545 int command = FTRACE_DISABLE_MCOUNT;
547 mutex_lock(&ftraced_lock);
548 /* ftraced_suspend is true if ftrace is running */
550 command |= FTRACE_DISABLE_CALLS;
552 ftrace_run_update_code(command);
553 mutex_unlock(&ftraced_lock);
556 static cycle_t ftrace_update_time;
557 static unsigned long ftrace_update_cnt;
558 unsigned long ftrace_update_tot_cnt;
560 static int notrace __ftrace_update_code(void *ignore)
562 struct dyn_ftrace *p;
563 struct hlist_head head;
564 struct hlist_node *t;
565 int save_ftrace_enabled;
569 /* Don't be recording funcs now */
570 save_ftrace_enabled = ftrace_enabled;
573 start = ftrace_now(raw_smp_processor_id());
574 ftrace_update_cnt = 0;
576 /* No locks needed, the machine is stopped! */
577 for (i = 0; i < FTRACE_HASHSIZE; i++) {
578 if (hlist_empty(&ftrace_hash[i]))
581 head = ftrace_hash[i];
582 INIT_HLIST_HEAD(&ftrace_hash[i]);
584 /* all CPUS are stopped, we are safe to modify code */
585 hlist_for_each_entry(p, t, &head, node) {
586 ftrace_code_disable(p);
592 stop = ftrace_now(raw_smp_processor_id());
593 ftrace_update_time = stop - start;
594 ftrace_update_tot_cnt += ftrace_update_cnt;
596 ftrace_enabled = save_ftrace_enabled;
601 static void notrace ftrace_update_code(void)
603 stop_machine_run(__ftrace_update_code, NULL, NR_CPUS);
606 static int notrace ftraced(void *ignore)
610 set_current_state(TASK_INTERRUPTIBLE);
612 while (!kthread_should_stop()) {
614 /* check once a second */
615 schedule_timeout(HZ);
617 mutex_lock(&ftrace_sysctl_lock);
618 mutex_lock(&ftraced_lock);
619 if (ftrace_enabled && ftraced_trigger && !ftraced_suspend) {
620 ftrace_record_suspend++;
621 ftrace_update_code();
622 usecs = nsecs_to_usecs(ftrace_update_time);
623 if (ftrace_update_tot_cnt > 100000) {
624 ftrace_update_tot_cnt = 0;
625 pr_info("hm, dftrace overflow: %lu change%s"
626 " (%lu total) in %lu usec%s\n",
628 ftrace_update_cnt != 1 ? "s" : "",
629 ftrace_update_tot_cnt,
630 usecs, usecs != 1 ? "s" : "");
634 ftrace_record_suspend--;
636 ftraced_iteration_counter++;
637 mutex_unlock(&ftraced_lock);
638 mutex_unlock(&ftrace_sysctl_lock);
640 wake_up_interruptible(&ftraced_waiters);
642 ftrace_shutdown_replenish();
644 set_current_state(TASK_INTERRUPTIBLE);
646 __set_current_state(TASK_RUNNING);
650 static int __init ftrace_dyn_table_alloc(void)
652 struct ftrace_page *pg;
656 /* allocate a few pages */
657 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
658 if (!ftrace_pages_start)
662 * Allocate a few more pages.
664 * TODO: have some parser search vmlinux before
665 * final linking to find all calls to ftrace.
667 * a) know how many pages to allocate.
669 * b) set up the table then.
671 * The dynamic code is still necessary for
675 pg = ftrace_pages = ftrace_pages_start;
677 cnt = NR_TO_INIT / ENTRIES_PER_PAGE;
679 for (i = 0; i < cnt; i++) {
680 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
682 /* If we fail, we'll try later anyway */
693 FTRACE_ITER_FILTER = (1 << 0),
694 FTRACE_ITER_CONT = (1 << 1),
697 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
699 struct ftrace_iterator {
701 struct ftrace_page *pg;
704 unsigned char buffer[FTRACE_BUFF_MAX+1];
709 static void notrace *
710 t_next(struct seq_file *m, void *v, loff_t *pos)
712 struct ftrace_iterator *iter = m->private;
713 struct dyn_ftrace *rec = NULL;
718 if (iter->idx >= iter->pg->index) {
719 if (iter->pg->next) {
720 iter->pg = iter->pg->next;
725 rec = &iter->pg->records[iter->idx++];
726 if ((rec->flags & FTRACE_FL_FAILED) ||
727 ((iter->flags & FTRACE_ITER_FILTER) &&
728 !(rec->flags & FTRACE_FL_FILTER))) {
739 static void *t_start(struct seq_file *m, loff_t *pos)
741 struct ftrace_iterator *iter = m->private;
745 if (*pos != iter->pos) {
746 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
750 p = t_next(m, p, &l);
756 static void t_stop(struct seq_file *m, void *p)
760 static int t_show(struct seq_file *m, void *v)
762 struct dyn_ftrace *rec = v;
763 char str[KSYM_SYMBOL_LEN];
768 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
770 seq_printf(m, "%s\n", str);
775 static struct seq_operations show_ftrace_seq_ops = {
783 ftrace_avail_open(struct inode *inode, struct file *file)
785 struct ftrace_iterator *iter;
788 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
792 iter->pg = ftrace_pages_start;
795 ret = seq_open(file, &show_ftrace_seq_ops);
797 struct seq_file *m = file->private_data;
807 int ftrace_avail_release(struct inode *inode, struct file *file)
809 struct seq_file *m = (struct seq_file *)file->private_data;
810 struct ftrace_iterator *iter = m->private;
812 seq_release(inode, file);
818 static void notrace ftrace_filter_reset(void)
820 struct ftrace_page *pg;
821 struct dyn_ftrace *rec;
824 /* keep kstop machine from running */
827 pg = ftrace_pages_start;
829 for (i = 0; i < pg->index; i++) {
830 rec = &pg->records[i];
831 if (rec->flags & FTRACE_FL_FAILED)
833 rec->flags &= ~FTRACE_FL_FILTER;
841 ftrace_filter_open(struct inode *inode, struct file *file)
843 struct ftrace_iterator *iter;
846 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
850 mutex_lock(&ftrace_filter_lock);
851 if ((file->f_mode & FMODE_WRITE) &&
852 !(file->f_flags & O_APPEND))
853 ftrace_filter_reset();
855 if (file->f_mode & FMODE_READ) {
856 iter->pg = ftrace_pages_start;
858 iter->flags = FTRACE_ITER_FILTER;
860 ret = seq_open(file, &show_ftrace_seq_ops);
862 struct seq_file *m = file->private_data;
867 file->private_data = iter;
868 mutex_unlock(&ftrace_filter_lock);
873 static ssize_t notrace
874 ftrace_filter_read(struct file *file, char __user *ubuf,
875 size_t cnt, loff_t *ppos)
877 if (file->f_mode & FMODE_READ)
878 return seq_read(file, ubuf, cnt, ppos);
883 static loff_t notrace
884 ftrace_filter_lseek(struct file *file, loff_t offset, int origin)
888 if (file->f_mode & FMODE_READ)
889 ret = seq_lseek(file, offset, origin);
891 file->f_pos = ret = 1;
904 ftrace_match(unsigned char *buff, int len)
906 char str[KSYM_SYMBOL_LEN];
908 struct ftrace_page *pg;
909 struct dyn_ftrace *rec;
910 int type = MATCH_FULL;
911 unsigned i, match = 0, search_len = 0;
913 for (i = 0; i < len; i++) {
914 if (buff[i] == '*') {
916 search = buff + i + 1;
917 type = MATCH_END_ONLY;
918 search_len = len - (i + 1);
920 if (type == MATCH_END_ONLY) {
921 type = MATCH_MIDDLE_ONLY;
924 type = MATCH_FRONT_ONLY;
932 /* keep kstop machine from running */
935 pg = ftrace_pages_start;
937 for (i = 0; i < pg->index; i++) {
941 rec = &pg->records[i];
942 if (rec->flags & FTRACE_FL_FAILED)
944 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
947 if (strcmp(str, buff) == 0)
950 case MATCH_FRONT_ONLY:
951 if (memcmp(str, buff, match) == 0)
954 case MATCH_MIDDLE_ONLY:
955 if (strstr(str, search))
959 ptr = strstr(str, search);
960 if (ptr && (ptr[search_len] == 0))
965 rec->flags |= FTRACE_FL_FILTER;
972 static ssize_t notrace
973 ftrace_filter_write(struct file *file, const char __user *ubuf,
974 size_t cnt, loff_t *ppos)
976 struct ftrace_iterator *iter;
984 mutex_lock(&ftrace_filter_lock);
986 if (file->f_mode & FMODE_READ) {
987 struct seq_file *m = file->private_data;
990 iter = file->private_data;
993 iter->flags &= ~FTRACE_ITER_CONT;
994 iter->buffer_idx = 0;
997 ret = get_user(ch, ubuf++);
1003 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1004 /* skip white space */
1005 while (cnt && isspace(ch)) {
1006 ret = get_user(ch, ubuf++);
1015 file->f_pos += read;
1020 iter->buffer_idx = 0;
1023 while (cnt && !isspace(ch)) {
1024 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1025 iter->buffer[iter->buffer_idx++] = ch;
1030 ret = get_user(ch, ubuf++);
1039 iter->buffer[iter->buffer_idx] = 0;
1040 ftrace_match(iter->buffer, iter->buffer_idx);
1041 iter->buffer_idx = 0;
1043 iter->flags |= FTRACE_ITER_CONT;
1046 file->f_pos += read;
1050 mutex_unlock(&ftrace_filter_lock);
1056 * ftrace_set_filter - set a function to filter on in ftrace
1057 * @buf - the string that holds the function filter text.
1058 * @len - the length of the string.
1059 * @reset - non zero to reset all filters before applying this filter.
1061 * Filters denote which functions should be enabled when tracing is enabled.
1062 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1064 notrace void ftrace_set_filter(unsigned char *buf, int len, int reset)
1066 mutex_lock(&ftrace_filter_lock);
1068 ftrace_filter_reset();
1070 ftrace_match(buf, len);
1071 mutex_unlock(&ftrace_filter_lock);
1075 ftrace_filter_release(struct inode *inode, struct file *file)
1077 struct seq_file *m = (struct seq_file *)file->private_data;
1078 struct ftrace_iterator *iter;
1080 mutex_lock(&ftrace_filter_lock);
1081 if (file->f_mode & FMODE_READ) {
1084 seq_release(inode, file);
1086 iter = file->private_data;
1088 if (iter->buffer_idx) {
1090 iter->buffer[iter->buffer_idx] = 0;
1091 ftrace_match(iter->buffer, iter->buffer_idx);
1094 mutex_lock(&ftrace_sysctl_lock);
1095 mutex_lock(&ftraced_lock);
1096 if (iter->filtered && ftraced_suspend && ftrace_enabled)
1097 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1098 mutex_unlock(&ftraced_lock);
1099 mutex_unlock(&ftrace_sysctl_lock);
1102 mutex_unlock(&ftrace_filter_lock);
1106 static struct file_operations ftrace_avail_fops = {
1107 .open = ftrace_avail_open,
1109 .llseek = seq_lseek,
1110 .release = ftrace_avail_release,
1113 static struct file_operations ftrace_filter_fops = {
1114 .open = ftrace_filter_open,
1115 .read = ftrace_filter_read,
1116 .write = ftrace_filter_write,
1117 .llseek = ftrace_filter_lseek,
1118 .release = ftrace_filter_release,
1122 * ftrace_force_update - force an update to all recording ftrace functions
1124 * The ftrace dynamic update daemon only wakes up once a second.
1125 * There may be cases where an update needs to be done immediately
1126 * for tests or internal kernel tracing to begin. This function
1127 * wakes the daemon to do an update and will not return until the
1128 * update is complete.
1130 int ftrace_force_update(void)
1132 unsigned long last_counter;
1133 DECLARE_WAITQUEUE(wait, current);
1139 mutex_lock(&ftraced_lock);
1140 last_counter = ftraced_iteration_counter;
1142 set_current_state(TASK_INTERRUPTIBLE);
1143 add_wait_queue(&ftraced_waiters, &wait);
1146 mutex_unlock(&ftraced_lock);
1147 wake_up_process(ftraced_task);
1149 mutex_lock(&ftraced_lock);
1150 if (signal_pending(current)) {
1154 set_current_state(TASK_INTERRUPTIBLE);
1155 } while (last_counter == ftraced_iteration_counter);
1157 mutex_unlock(&ftraced_lock);
1158 remove_wait_queue(&ftraced_waiters, &wait);
1159 set_current_state(TASK_RUNNING);
1164 static __init int ftrace_init_debugfs(void)
1166 struct dentry *d_tracer;
1167 struct dentry *entry;
1169 d_tracer = tracing_init_dentry();
1171 entry = debugfs_create_file("available_filter_functions", 0444,
1172 d_tracer, NULL, &ftrace_avail_fops);
1174 pr_warning("Could not create debugfs "
1175 "'available_filter_functions' entry\n");
1177 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1178 NULL, &ftrace_filter_fops);
1180 pr_warning("Could not create debugfs "
1181 "'set_ftrace_filter' entry\n");
1185 fs_initcall(ftrace_init_debugfs);
1187 static int __init notrace ftrace_dynamic_init(void)
1189 struct task_struct *p;
1193 addr = (unsigned long)ftrace_record_ip;
1194 stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS);
1196 /* ftrace_dyn_arch_init places the return code in addr */
1200 ret = ftrace_dyn_table_alloc();
1204 p = kthread_run(ftraced, NULL, "ftraced");
1208 last_ftrace_enabled = ftrace_enabled = 1;
1214 core_initcall(ftrace_dynamic_init);
1216 # define ftrace_startup() do { } while (0)
1217 # define ftrace_shutdown() do { } while (0)
1218 # define ftrace_startup_sysctl() do { } while (0)
1219 # define ftrace_shutdown_sysctl() do { } while (0)
1220 #endif /* CONFIG_DYNAMIC_FTRACE */
1223 * register_ftrace_function - register a function for profiling
1224 * @ops - ops structure that holds the function for profiling.
1226 * Register a function to be called by all functions in the
1229 * Note: @ops->func and all the functions it calls must be labeled
1230 * with "notrace", otherwise it will go into a
1233 int register_ftrace_function(struct ftrace_ops *ops)
1237 mutex_lock(&ftrace_sysctl_lock);
1238 ret = __register_ftrace_function(ops);
1240 mutex_unlock(&ftrace_sysctl_lock);
1246 * unregister_ftrace_function - unresgister a function for profiling.
1247 * @ops - ops structure that holds the function to unregister
1249 * Unregister a function that was added to be called by ftrace profiling.
1251 int unregister_ftrace_function(struct ftrace_ops *ops)
1255 mutex_lock(&ftrace_sysctl_lock);
1256 ret = __unregister_ftrace_function(ops);
1258 mutex_unlock(&ftrace_sysctl_lock);
1264 ftrace_enable_sysctl(struct ctl_table *table, int write,
1265 struct file *file, void __user *buffer, size_t *lenp,
1270 mutex_lock(&ftrace_sysctl_lock);
1272 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
1274 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1277 last_ftrace_enabled = ftrace_enabled;
1279 if (ftrace_enabled) {
1281 ftrace_startup_sysctl();
1283 /* we are starting ftrace again */
1284 if (ftrace_list != &ftrace_list_end) {
1285 if (ftrace_list->next == &ftrace_list_end)
1286 ftrace_trace_function = ftrace_list->func;
1288 ftrace_trace_function = ftrace_list_func;
1292 /* stopping ftrace calls (just send to ftrace_stub) */
1293 ftrace_trace_function = ftrace_stub;
1295 ftrace_shutdown_sysctl();
1299 mutex_unlock(&ftrace_sysctl_lock);