2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/kthread.h>
22 #include <linux/hardirq.h>
23 #include <linux/ftrace.h>
24 #include <linux/uaccess.h>
25 #include <linux/sysctl.h>
26 #include <linux/hash.h>
27 #include <linux/ctype.h>
28 #include <linux/list.h>
33 static int last_ftrace_enabled;
35 static DEFINE_SPINLOCK(ftrace_lock);
36 static DEFINE_MUTEX(ftrace_sysctl_lock);
38 static struct ftrace_ops ftrace_list_end __read_mostly =
43 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
44 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
46 /* mcount is defined per arch in assembly */
47 EXPORT_SYMBOL(mcount);
49 notrace void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
51 struct ftrace_ops *op = ftrace_list;
53 /* in case someone actually ports this to alpha! */
54 read_barrier_depends();
56 while (op != &ftrace_list_end) {
58 read_barrier_depends();
59 op->func(ip, parent_ip);
65 * clear_ftrace_function - reset the ftrace function
67 * This NULLs the ftrace function and in essence stops
68 * tracing. There may be lag
70 void clear_ftrace_function(void)
72 ftrace_trace_function = ftrace_stub;
75 static int notrace __register_ftrace_function(struct ftrace_ops *ops)
77 /* Should never be called by interrupts */
78 spin_lock(&ftrace_lock);
80 ops->next = ftrace_list;
82 * We are entering ops into the ftrace_list but another
83 * CPU might be walking that list. We need to make sure
84 * the ops->next pointer is valid before another CPU sees
85 * the ops pointer included into the ftrace_list.
92 * For one func, simply call it directly.
93 * For more than one func, call the chain.
95 if (ops->next == &ftrace_list_end)
96 ftrace_trace_function = ops->func;
98 ftrace_trace_function = ftrace_list_func;
101 spin_unlock(&ftrace_lock);
106 static int notrace __unregister_ftrace_function(struct ftrace_ops *ops)
108 struct ftrace_ops **p;
111 spin_lock(&ftrace_lock);
114 * If we are removing the last function, then simply point
115 * to the ftrace_stub.
117 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
118 ftrace_trace_function = ftrace_stub;
119 ftrace_list = &ftrace_list_end;
123 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
134 if (ftrace_enabled) {
135 /* If we only have one func left, then call that directly */
136 if (ftrace_list == &ftrace_list_end ||
137 ftrace_list->next == &ftrace_list_end)
138 ftrace_trace_function = ftrace_list->func;
142 spin_unlock(&ftrace_lock);
147 #ifdef CONFIG_DYNAMIC_FTRACE
150 FTRACE_ENABLE_CALLS = (1 << 0),
151 FTRACE_DISABLE_CALLS = (1 << 1),
152 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
153 FTRACE_ENABLE_MCOUNT = (1 << 3),
154 FTRACE_DISABLE_MCOUNT = (1 << 4),
157 static int ftrace_filtered;
159 static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
161 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
163 static DEFINE_SPINLOCK(ftrace_shutdown_lock);
164 static DEFINE_MUTEX(ftraced_lock);
165 static DEFINE_MUTEX(ftrace_filter_lock);
168 struct ftrace_page *next;
170 struct dyn_ftrace records[];
171 } __attribute__((packed));
173 #define ENTRIES_PER_PAGE \
174 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
176 /* estimate from running different kernels */
177 #define NR_TO_INIT 10000
179 static struct ftrace_page *ftrace_pages_start;
180 static struct ftrace_page *ftrace_pages;
182 static int ftraced_trigger;
183 static int ftraced_suspend;
185 static int ftrace_record_suspend;
188 notrace ftrace_ip_in_hash(unsigned long ip, unsigned long key)
190 struct dyn_ftrace *p;
191 struct hlist_node *t;
194 hlist_for_each_entry(p, t, &ftrace_hash[key], node) {
204 static inline void notrace
205 ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
207 hlist_add_head(&node->node, &ftrace_hash[key]);
210 static notrace struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
212 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
213 if (!ftrace_pages->next)
215 ftrace_pages = ftrace_pages->next;
218 return &ftrace_pages->records[ftrace_pages->index++];
222 ftrace_record_ip(unsigned long ip)
224 struct dyn_ftrace *node;
233 resched = need_resched();
234 preempt_disable_notrace();
236 /* We simply need to protect against recursion */
237 __get_cpu_var(ftrace_shutdown_disable_cpu)++;
238 if (__get_cpu_var(ftrace_shutdown_disable_cpu) != 1)
241 if (unlikely(ftrace_record_suspend))
244 key = hash_long(ip, FTRACE_HASHBITS);
246 WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
248 if (ftrace_ip_in_hash(ip, key))
251 atomic = irqs_disabled();
253 spin_lock_irqsave(&ftrace_shutdown_lock, flags);
255 /* This ip may have hit the hash before the lock */
256 if (ftrace_ip_in_hash(ip, key))
260 * There's a slight race that the ftraced will update the
261 * hash and reset here. If it is already converted, skip it.
263 if (ftrace_ip_converted(ip))
266 node = ftrace_alloc_dyn_node(ip);
272 ftrace_add_hash(node, key);
277 spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
279 __get_cpu_var(ftrace_shutdown_disable_cpu)--;
281 /* prevent recursion with scheduler */
283 preempt_enable_no_resched_notrace();
285 preempt_enable_notrace();
288 #define FTRACE_ADDR ((long)(&ftrace_caller))
289 #define MCOUNT_ADDR ((long)(&mcount))
292 __ftrace_replace_code(struct dyn_ftrace *rec,
293 unsigned char *old, unsigned char *new, int enable)
300 if (ftrace_filtered && enable) {
303 * If filtering is on:
305 * If this record is set to be filtered and
306 * is enabled then do nothing.
308 * If this record is set to be filtered and
309 * it is not enabled, enable it.
311 * If this record is not set to be filtered
312 * and it is not enabled do nothing.
314 * If this record is not set to be filtered and
315 * it is enabled, disable it.
317 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
319 if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
324 * If it is enabled disable it,
325 * otherwise enable it!
327 if (fl == FTRACE_FL_ENABLED) {
328 /* swap new and old */
330 old = ftrace_call_replace(ip, FTRACE_ADDR);
331 rec->flags &= ~FTRACE_FL_ENABLED;
333 new = ftrace_call_replace(ip, FTRACE_ADDR);
334 rec->flags |= FTRACE_FL_ENABLED;
339 new = ftrace_call_replace(ip, FTRACE_ADDR);
341 old = ftrace_call_replace(ip, FTRACE_ADDR);
344 if (rec->flags & FTRACE_FL_ENABLED)
346 rec->flags |= FTRACE_FL_ENABLED;
348 if (!(rec->flags & FTRACE_FL_ENABLED))
350 rec->flags &= ~FTRACE_FL_ENABLED;
354 failed = ftrace_modify_code(ip, old, new);
356 rec->flags |= FTRACE_FL_FAILED;
359 static void notrace ftrace_replace_code(int enable)
361 unsigned char *new = NULL, *old = NULL;
362 struct dyn_ftrace *rec;
363 struct ftrace_page *pg;
367 old = ftrace_nop_replace();
369 new = ftrace_nop_replace();
371 for (pg = ftrace_pages_start; pg; pg = pg->next) {
372 for (i = 0; i < pg->index; i++) {
373 rec = &pg->records[i];
375 /* don't modify code that has already faulted */
376 if (rec->flags & FTRACE_FL_FAILED)
379 __ftrace_replace_code(rec, old, new, enable);
384 static notrace void ftrace_shutdown_replenish(void)
386 if (ftrace_pages->next)
389 /* allocate another page */
390 ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
394 ftrace_code_disable(struct dyn_ftrace *rec)
397 unsigned char *nop, *call;
402 nop = ftrace_nop_replace();
403 call = ftrace_call_replace(ip, MCOUNT_ADDR);
405 failed = ftrace_modify_code(ip, call, nop);
407 rec->flags |= FTRACE_FL_FAILED;
410 static int notrace __ftrace_modify_code(void *data)
415 if (*command & FTRACE_ENABLE_CALLS)
416 ftrace_replace_code(1);
417 else if (*command & FTRACE_DISABLE_CALLS)
418 ftrace_replace_code(0);
420 if (*command & FTRACE_UPDATE_TRACE_FUNC)
421 ftrace_update_ftrace_func(ftrace_trace_function);
423 if (*command & FTRACE_ENABLE_MCOUNT) {
424 addr = (unsigned long)ftrace_record_ip;
425 ftrace_mcount_set(&addr);
426 } else if (*command & FTRACE_DISABLE_MCOUNT) {
427 addr = (unsigned long)ftrace_stub;
428 ftrace_mcount_set(&addr);
434 static void notrace ftrace_run_update_code(int command)
436 stop_machine_run(__ftrace_modify_code, &command, NR_CPUS);
439 static ftrace_func_t saved_ftrace_func;
441 static void notrace ftrace_startup(void)
445 mutex_lock(&ftraced_lock);
447 if (ftraced_suspend == 1)
448 command |= FTRACE_ENABLE_CALLS;
450 if (saved_ftrace_func != ftrace_trace_function) {
451 saved_ftrace_func = ftrace_trace_function;
452 command |= FTRACE_UPDATE_TRACE_FUNC;
455 if (!command || !ftrace_enabled)
458 ftrace_run_update_code(command);
460 mutex_unlock(&ftraced_lock);
463 static void notrace ftrace_shutdown(void)
467 mutex_lock(&ftraced_lock);
469 if (!ftraced_suspend)
470 command |= FTRACE_DISABLE_CALLS;
472 if (saved_ftrace_func != ftrace_trace_function) {
473 saved_ftrace_func = ftrace_trace_function;
474 command |= FTRACE_UPDATE_TRACE_FUNC;
477 if (!command || !ftrace_enabled)
480 ftrace_run_update_code(command);
482 mutex_unlock(&ftraced_lock);
485 static void notrace ftrace_startup_sysctl(void)
487 int command = FTRACE_ENABLE_MCOUNT;
489 mutex_lock(&ftraced_lock);
490 /* Force update next time */
491 saved_ftrace_func = NULL;
492 /* ftraced_suspend is true if we want ftrace running */
494 command |= FTRACE_ENABLE_CALLS;
496 ftrace_run_update_code(command);
497 mutex_unlock(&ftraced_lock);
500 static void notrace ftrace_shutdown_sysctl(void)
502 int command = FTRACE_DISABLE_MCOUNT;
504 mutex_lock(&ftraced_lock);
505 /* ftraced_suspend is true if ftrace is running */
507 command |= FTRACE_DISABLE_CALLS;
509 ftrace_run_update_code(command);
510 mutex_unlock(&ftraced_lock);
513 static cycle_t ftrace_update_time;
514 static unsigned long ftrace_update_cnt;
515 unsigned long ftrace_update_tot_cnt;
517 static int notrace __ftrace_update_code(void *ignore)
519 struct dyn_ftrace *p;
520 struct hlist_head head;
521 struct hlist_node *t;
522 int save_ftrace_enabled;
526 /* Don't be recording funcs now */
527 save_ftrace_enabled = ftrace_enabled;
530 start = now(raw_smp_processor_id());
531 ftrace_update_cnt = 0;
533 /* No locks needed, the machine is stopped! */
534 for (i = 0; i < FTRACE_HASHSIZE; i++) {
535 if (hlist_empty(&ftrace_hash[i]))
538 head = ftrace_hash[i];
539 INIT_HLIST_HEAD(&ftrace_hash[i]);
541 /* all CPUS are stopped, we are safe to modify code */
542 hlist_for_each_entry(p, t, &head, node) {
543 ftrace_code_disable(p);
549 stop = now(raw_smp_processor_id());
550 ftrace_update_time = stop - start;
551 ftrace_update_tot_cnt += ftrace_update_cnt;
553 ftrace_enabled = save_ftrace_enabled;
558 static void notrace ftrace_update_code(void)
560 stop_machine_run(__ftrace_update_code, NULL, NR_CPUS);
563 static int notrace ftraced(void *ignore)
567 set_current_state(TASK_INTERRUPTIBLE);
569 while (!kthread_should_stop()) {
571 /* check once a second */
572 schedule_timeout(HZ);
574 mutex_lock(&ftrace_sysctl_lock);
575 mutex_lock(&ftraced_lock);
576 if (ftrace_enabled && ftraced_trigger && !ftraced_suspend) {
577 ftrace_record_suspend++;
578 ftrace_update_code();
579 usecs = nsecs_to_usecs(ftrace_update_time);
580 if (ftrace_update_tot_cnt > 100000) {
581 ftrace_update_tot_cnt = 0;
582 pr_info("hm, dftrace overflow: %lu change%s"
583 " (%lu total) in %lu usec%s\n",
585 ftrace_update_cnt != 1 ? "s" : "",
586 ftrace_update_tot_cnt,
587 usecs, usecs != 1 ? "s" : "");
591 ftrace_record_suspend--;
593 mutex_unlock(&ftraced_lock);
594 mutex_unlock(&ftrace_sysctl_lock);
596 ftrace_shutdown_replenish();
598 set_current_state(TASK_INTERRUPTIBLE);
600 __set_current_state(TASK_RUNNING);
604 static int __init ftrace_dyn_table_alloc(void)
606 struct ftrace_page *pg;
610 /* allocate a few pages */
611 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
612 if (!ftrace_pages_start)
616 * Allocate a few more pages.
618 * TODO: have some parser search vmlinux before
619 * final linking to find all calls to ftrace.
621 * a) know how many pages to allocate.
623 * b) set up the table then.
625 * The dynamic code is still necessary for
629 pg = ftrace_pages = ftrace_pages_start;
631 cnt = NR_TO_INIT / ENTRIES_PER_PAGE;
633 for (i = 0; i < cnt; i++) {
634 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
636 /* If we fail, we'll try later anyway */
647 FTRACE_ITER_FILTER = (1 << 0),
648 FTRACE_ITER_CONT = (1 << 1),
651 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
653 struct ftrace_iterator {
655 struct ftrace_page *pg;
658 unsigned char buffer[FTRACE_BUFF_MAX+1];
663 static void notrace *
664 t_next(struct seq_file *m, void *v, loff_t *pos)
666 struct ftrace_iterator *iter = m->private;
667 struct dyn_ftrace *rec = NULL;
672 if (iter->idx >= iter->pg->index) {
673 if (iter->pg->next) {
674 iter->pg = iter->pg->next;
679 rec = &iter->pg->records[iter->idx++];
680 if ((rec->flags & FTRACE_FL_FAILED) ||
681 ((iter->flags & FTRACE_ITER_FILTER) &&
682 !(rec->flags & FTRACE_FL_FILTER))) {
693 static void *t_start(struct seq_file *m, loff_t *pos)
695 struct ftrace_iterator *iter = m->private;
699 if (*pos != iter->pos) {
700 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
704 p = t_next(m, p, &l);
710 static void t_stop(struct seq_file *m, void *p)
714 static int t_show(struct seq_file *m, void *v)
716 struct dyn_ftrace *rec = v;
717 char str[KSYM_SYMBOL_LEN];
722 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
724 seq_printf(m, "%s\n", str);
729 static struct seq_operations show_ftrace_seq_ops = {
737 ftrace_avail_open(struct inode *inode, struct file *file)
739 struct ftrace_iterator *iter;
742 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
746 iter->pg = ftrace_pages_start;
749 ret = seq_open(file, &show_ftrace_seq_ops);
751 struct seq_file *m = file->private_data;
759 int ftrace_avail_release(struct inode *inode, struct file *file)
761 struct seq_file *m = (struct seq_file *)file->private_data;
762 struct ftrace_iterator *iter = m->private;
764 seq_release(inode, file);
769 static void notrace ftrace_filter_reset(void)
771 struct ftrace_page *pg;
772 struct dyn_ftrace *rec;
775 /* keep kstop machine from running */
778 pg = ftrace_pages_start;
780 for (i = 0; i < pg->index; i++) {
781 rec = &pg->records[i];
782 if (rec->flags & FTRACE_FL_FAILED)
784 rec->flags &= ~FTRACE_FL_FILTER;
792 ftrace_filter_open(struct inode *inode, struct file *file)
794 struct ftrace_iterator *iter;
797 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
801 mutex_lock(&ftrace_filter_lock);
802 if ((file->f_mode & FMODE_WRITE) &&
803 !(file->f_flags & O_APPEND))
804 ftrace_filter_reset();
806 if (file->f_mode & FMODE_READ) {
807 iter->pg = ftrace_pages_start;
809 iter->flags = FTRACE_ITER_FILTER;
811 ret = seq_open(file, &show_ftrace_seq_ops);
813 struct seq_file *m = file->private_data;
818 file->private_data = iter;
819 mutex_unlock(&ftrace_filter_lock);
824 static ssize_t notrace
825 ftrace_filter_read(struct file *file, char __user *ubuf,
826 size_t cnt, loff_t *ppos)
828 if (file->f_mode & FMODE_READ)
829 return seq_read(file, ubuf, cnt, ppos);
834 static loff_t notrace
835 ftrace_filter_lseek(struct file *file, loff_t offset, int origin)
839 if (file->f_mode & FMODE_READ)
840 ret = seq_lseek(file, offset, origin);
842 file->f_pos = ret = 1;
855 ftrace_match(unsigned char *buff, int len)
857 char str[KSYM_SYMBOL_LEN];
859 struct ftrace_page *pg;
860 struct dyn_ftrace *rec;
861 int type = MATCH_FULL;
862 unsigned i, match = 0, search_len = 0;
864 for (i = 0; i < len; i++) {
865 if (buff[i] == '*') {
867 search = buff + i + 1;
868 type = MATCH_END_ONLY;
869 search_len = len - (i + 1);
871 if (type == MATCH_END_ONLY) {
872 type = MATCH_MIDDLE_ONLY;
875 type = MATCH_FRONT_ONLY;
883 /* keep kstop machine from running */
886 pg = ftrace_pages_start;
888 for (i = 0; i < pg->index; i++) {
892 rec = &pg->records[i];
893 if (rec->flags & FTRACE_FL_FAILED)
895 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
898 if (strcmp(str, buff) == 0)
901 case MATCH_FRONT_ONLY:
902 if (memcmp(str, buff, match) == 0)
905 case MATCH_MIDDLE_ONLY:
906 if (strstr(str, search))
910 ptr = strstr(str, search);
911 if (ptr && (ptr[search_len] == 0))
916 rec->flags |= FTRACE_FL_FILTER;
923 static ssize_t notrace
924 ftrace_filter_write(struct file *file, const char __user *ubuf,
925 size_t cnt, loff_t *ppos)
927 struct ftrace_iterator *iter;
935 mutex_lock(&ftrace_filter_lock);
937 if (file->f_mode & FMODE_READ) {
938 struct seq_file *m = file->private_data;
941 iter = file->private_data;
944 iter->flags &= ~FTRACE_ITER_CONT;
945 iter->buffer_idx = 0;
948 ret = get_user(ch, ubuf++);
954 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
955 /* skip white space */
956 while (cnt && isspace(ch)) {
957 ret = get_user(ch, ubuf++);
971 iter->buffer_idx = 0;
974 while (cnt && !isspace(ch)) {
975 if (iter->buffer_idx < FTRACE_BUFF_MAX)
976 iter->buffer[iter->buffer_idx++] = ch;
981 ret = get_user(ch, ubuf++);
990 iter->buffer[iter->buffer_idx] = 0;
991 ftrace_match(iter->buffer, iter->buffer_idx);
992 iter->buffer_idx = 0;
994 iter->flags |= FTRACE_ITER_CONT;
1001 mutex_unlock(&ftrace_filter_lock);
1007 ftrace_filter_release(struct inode *inode, struct file *file)
1009 struct seq_file *m = (struct seq_file *)file->private_data;
1010 struct ftrace_iterator *iter;
1012 mutex_lock(&ftrace_filter_lock);
1013 if (file->f_mode & FMODE_READ) {
1016 seq_release(inode, file);
1018 iter = file->private_data;
1020 if (iter->buffer_idx) {
1022 iter->buffer[iter->buffer_idx] = 0;
1023 ftrace_match(iter->buffer, iter->buffer_idx);
1026 mutex_lock(&ftrace_sysctl_lock);
1027 mutex_lock(&ftraced_lock);
1028 if (iter->filtered && ftraced_suspend && ftrace_enabled)
1029 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1030 mutex_unlock(&ftraced_lock);
1031 mutex_unlock(&ftrace_sysctl_lock);
1034 mutex_unlock(&ftrace_filter_lock);
1038 static struct file_operations ftrace_avail_fops = {
1039 .open = ftrace_avail_open,
1041 .llseek = seq_lseek,
1042 .release = ftrace_avail_release,
1045 static struct file_operations ftrace_filter_fops = {
1046 .open = ftrace_filter_open,
1047 .read = ftrace_filter_read,
1048 .write = ftrace_filter_write,
1049 .llseek = ftrace_filter_lseek,
1050 .release = ftrace_filter_release,
1053 static __init int ftrace_init_debugfs(void)
1055 struct dentry *d_tracer;
1056 struct dentry *entry;
1058 d_tracer = tracing_init_dentry();
1060 entry = debugfs_create_file("available_filter_functions", 0444,
1061 d_tracer, NULL, &ftrace_avail_fops);
1063 pr_warning("Could not create debugfs "
1064 "'available_filter_functions' entry\n");
1066 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1067 NULL, &ftrace_filter_fops);
1069 pr_warning("Could not create debugfs "
1070 "'set_ftrace_filter' entry\n");
1074 fs_initcall(ftrace_init_debugfs);
1076 static int __init notrace ftrace_dynamic_init(void)
1078 struct task_struct *p;
1082 addr = (unsigned long)ftrace_record_ip;
1083 stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS);
1085 /* ftrace_dyn_arch_init places the return code in addr */
1089 ret = ftrace_dyn_table_alloc();
1093 p = kthread_run(ftraced, NULL, "ftraced");
1097 last_ftrace_enabled = ftrace_enabled = 1;
1102 core_initcall(ftrace_dynamic_init);
1104 # define ftrace_startup() do { } while (0)
1105 # define ftrace_shutdown() do { } while (0)
1106 # define ftrace_startup_sysctl() do { } while (0)
1107 # define ftrace_shutdown_sysctl() do { } while (0)
1108 #endif /* CONFIG_DYNAMIC_FTRACE */
1111 * register_ftrace_function - register a function for profiling
1112 * @ops - ops structure that holds the function for profiling.
1114 * Register a function to be called by all functions in the
1117 * Note: @ops->func and all the functions it calls must be labeled
1118 * with "notrace", otherwise it will go into a
1121 int register_ftrace_function(struct ftrace_ops *ops)
1125 mutex_lock(&ftrace_sysctl_lock);
1126 ret = __register_ftrace_function(ops);
1128 mutex_unlock(&ftrace_sysctl_lock);
1134 * unregister_ftrace_function - unresgister a function for profiling.
1135 * @ops - ops structure that holds the function to unregister
1137 * Unregister a function that was added to be called by ftrace profiling.
1139 int unregister_ftrace_function(struct ftrace_ops *ops)
1143 mutex_lock(&ftrace_sysctl_lock);
1144 ret = __unregister_ftrace_function(ops);
1146 mutex_unlock(&ftrace_sysctl_lock);
1152 ftrace_enable_sysctl(struct ctl_table *table, int write,
1153 struct file *file, void __user *buffer, size_t *lenp,
1158 mutex_lock(&ftrace_sysctl_lock);
1160 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
1162 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1165 last_ftrace_enabled = ftrace_enabled;
1167 if (ftrace_enabled) {
1169 ftrace_startup_sysctl();
1171 /* we are starting ftrace again */
1172 if (ftrace_list != &ftrace_list_end) {
1173 if (ftrace_list->next == &ftrace_list_end)
1174 ftrace_trace_function = ftrace_list->func;
1176 ftrace_trace_function = ftrace_list_func;
1180 /* stopping ftrace calls (just send to ftrace_stub) */
1181 ftrace_trace_function = ftrace_stub;
1183 ftrace_shutdown_sysctl();
1187 mutex_unlock(&ftrace_sysctl_lock);