tracing: add function profiler
[safe/jmp/linux-2.6] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/kprobes.h>
26 #include <linux/ftrace.h>
27 #include <linux/sysctl.h>
28 #include <linux/ctype.h>
29 #include <linux/list.h>
30 #include <linux/hash.h>
31
32 #include <trace/sched.h>
33
34 #include <asm/ftrace.h>
35
36 #include "trace.h"
37 #include "trace_stat.h"
38
39 #define FTRACE_WARN_ON(cond)                    \
40         do {                                    \
41                 if (WARN_ON(cond))              \
42                         ftrace_kill();          \
43         } while (0)
44
45 #define FTRACE_WARN_ON_ONCE(cond)               \
46         do {                                    \
47                 if (WARN_ON_ONCE(cond))         \
48                         ftrace_kill();          \
49         } while (0)
50
51 /* hash bits for specific function selection */
52 #define FTRACE_HASH_BITS 7
53 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
54
55 /* ftrace_enabled is a method to turn ftrace on or off */
56 int ftrace_enabled __read_mostly;
57 static int last_ftrace_enabled;
58
59 /* Quick disabling of function tracer. */
60 int function_trace_stop;
61
62 /*
63  * ftrace_disabled is set when an anomaly is discovered.
64  * ftrace_disabled is much stronger than ftrace_enabled.
65  */
66 static int ftrace_disabled __read_mostly;
67
68 static DEFINE_MUTEX(ftrace_lock);
69
70 static struct ftrace_ops ftrace_list_end __read_mostly =
71 {
72         .func = ftrace_stub,
73 };
74
75 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
76 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
77 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
78 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
79
80 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
81 {
82         struct ftrace_ops *op = ftrace_list;
83
84         /* in case someone actually ports this to alpha! */
85         read_barrier_depends();
86
87         while (op != &ftrace_list_end) {
88                 /* silly alpha */
89                 read_barrier_depends();
90                 op->func(ip, parent_ip);
91                 op = op->next;
92         };
93 }
94
95 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
96 {
97         if (!test_tsk_trace_trace(current))
98                 return;
99
100         ftrace_pid_function(ip, parent_ip);
101 }
102
103 static void set_ftrace_pid_function(ftrace_func_t func)
104 {
105         /* do not set ftrace_pid_function to itself! */
106         if (func != ftrace_pid_func)
107                 ftrace_pid_function = func;
108 }
109
110 /**
111  * clear_ftrace_function - reset the ftrace function
112  *
113  * This NULLs the ftrace function and in essence stops
114  * tracing.  There may be lag
115  */
116 void clear_ftrace_function(void)
117 {
118         ftrace_trace_function = ftrace_stub;
119         __ftrace_trace_function = ftrace_stub;
120         ftrace_pid_function = ftrace_stub;
121 }
122
123 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
124 /*
125  * For those archs that do not test ftrace_trace_stop in their
126  * mcount call site, we need to do it from C.
127  */
128 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
129 {
130         if (function_trace_stop)
131                 return;
132
133         __ftrace_trace_function(ip, parent_ip);
134 }
135 #endif
136
137 static int __register_ftrace_function(struct ftrace_ops *ops)
138 {
139         ops->next = ftrace_list;
140         /*
141          * We are entering ops into the ftrace_list but another
142          * CPU might be walking that list. We need to make sure
143          * the ops->next pointer is valid before another CPU sees
144          * the ops pointer included into the ftrace_list.
145          */
146         smp_wmb();
147         ftrace_list = ops;
148
149         if (ftrace_enabled) {
150                 ftrace_func_t func;
151
152                 if (ops->next == &ftrace_list_end)
153                         func = ops->func;
154                 else
155                         func = ftrace_list_func;
156
157                 if (ftrace_pid_trace) {
158                         set_ftrace_pid_function(func);
159                         func = ftrace_pid_func;
160                 }
161
162                 /*
163                  * For one func, simply call it directly.
164                  * For more than one func, call the chain.
165                  */
166 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
167                 ftrace_trace_function = func;
168 #else
169                 __ftrace_trace_function = func;
170                 ftrace_trace_function = ftrace_test_stop_func;
171 #endif
172         }
173
174         return 0;
175 }
176
177 static int __unregister_ftrace_function(struct ftrace_ops *ops)
178 {
179         struct ftrace_ops **p;
180
181         /*
182          * If we are removing the last function, then simply point
183          * to the ftrace_stub.
184          */
185         if (ftrace_list == ops && ops->next == &ftrace_list_end) {
186                 ftrace_trace_function = ftrace_stub;
187                 ftrace_list = &ftrace_list_end;
188                 return 0;
189         }
190
191         for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
192                 if (*p == ops)
193                         break;
194
195         if (*p != ops)
196                 return -1;
197
198         *p = (*p)->next;
199
200         if (ftrace_enabled) {
201                 /* If we only have one func left, then call that directly */
202                 if (ftrace_list->next == &ftrace_list_end) {
203                         ftrace_func_t func = ftrace_list->func;
204
205                         if (ftrace_pid_trace) {
206                                 set_ftrace_pid_function(func);
207                                 func = ftrace_pid_func;
208                         }
209 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
210                         ftrace_trace_function = func;
211 #else
212                         __ftrace_trace_function = func;
213 #endif
214                 }
215         }
216
217         return 0;
218 }
219
220 static void ftrace_update_pid_func(void)
221 {
222         ftrace_func_t func;
223
224         if (ftrace_trace_function == ftrace_stub)
225                 return;
226
227         func = ftrace_trace_function;
228
229         if (ftrace_pid_trace) {
230                 set_ftrace_pid_function(func);
231                 func = ftrace_pid_func;
232         } else {
233                 if (func == ftrace_pid_func)
234                         func = ftrace_pid_function;
235         }
236
237 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
238         ftrace_trace_function = func;
239 #else
240         __ftrace_trace_function = func;
241 #endif
242 }
243
244 /* set when tracing only a pid */
245 struct pid *ftrace_pid_trace;
246 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
247
248 #ifdef CONFIG_DYNAMIC_FTRACE
249
250 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
251 # error Dynamic ftrace depends on MCOUNT_RECORD
252 #endif
253
254 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
255
256 struct ftrace_func_probe {
257         struct hlist_node       node;
258         struct ftrace_probe_ops *ops;
259         unsigned long           flags;
260         unsigned long           ip;
261         void                    *data;
262         struct rcu_head         rcu;
263 };
264
265 enum {
266         FTRACE_ENABLE_CALLS             = (1 << 0),
267         FTRACE_DISABLE_CALLS            = (1 << 1),
268         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
269         FTRACE_ENABLE_MCOUNT            = (1 << 3),
270         FTRACE_DISABLE_MCOUNT           = (1 << 4),
271         FTRACE_START_FUNC_RET           = (1 << 5),
272         FTRACE_STOP_FUNC_RET            = (1 << 6),
273 };
274
275 static int ftrace_filtered;
276
277 static struct dyn_ftrace *ftrace_new_addrs;
278
279 static DEFINE_MUTEX(ftrace_regex_lock);
280
281 struct ftrace_page {
282         struct ftrace_page      *next;
283         int                     index;
284         struct dyn_ftrace       records[];
285 };
286
287 #define ENTRIES_PER_PAGE \
288   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
289
290 /* estimate from running different kernels */
291 #define NR_TO_INIT              10000
292
293 static struct ftrace_page       *ftrace_pages_start;
294 static struct ftrace_page       *ftrace_pages;
295
296 static struct dyn_ftrace *ftrace_free_records;
297
298 /*
299  * This is a double for. Do not use 'break' to break out of the loop,
300  * you must use a goto.
301  */
302 #define do_for_each_ftrace_rec(pg, rec)                                 \
303         for (pg = ftrace_pages_start; pg; pg = pg->next) {              \
304                 int _____i;                                             \
305                 for (_____i = 0; _____i < pg->index; _____i++) {        \
306                         rec = &pg->records[_____i];
307
308 #define while_for_each_ftrace_rec()             \
309                 }                               \
310         }
311
312 #ifdef CONFIG_FUNCTION_PROFILER
313 static struct hlist_head *ftrace_profile_hash;
314 static int ftrace_profile_bits;
315 static int ftrace_profile_enabled;
316 static DEFINE_MUTEX(ftrace_profile_lock);
317
318 static void *
319 function_stat_next(void *v, int idx)
320 {
321         struct dyn_ftrace *rec = v;
322         struct ftrace_page *pg;
323
324         pg = (struct ftrace_page *)((unsigned long)rec & PAGE_MASK);
325
326  again:
327         rec++;
328         if ((void *)rec >= (void *)&pg->records[pg->index]) {
329                 pg = pg->next;
330                 if (!pg)
331                         return NULL;
332                 rec = &pg->records[0];
333         }
334
335         if (rec->flags & FTRACE_FL_FREE ||
336             rec->flags & FTRACE_FL_FAILED ||
337             !(rec->flags & FTRACE_FL_CONVERTED) ||
338             /* ignore non hit functions */
339             !rec->counter)
340                 goto again;
341
342         return rec;
343 }
344
345 static void *function_stat_start(struct tracer_stat *trace)
346 {
347         return function_stat_next(&ftrace_pages_start->records[0], 0);
348 }
349
350 static int function_stat_cmp(void *p1, void *p2)
351 {
352         struct dyn_ftrace *a = p1;
353         struct dyn_ftrace *b = p2;
354
355         if (a->counter < b->counter)
356                 return -1;
357         if (a->counter > b->counter)
358                 return 1;
359         else
360                 return 0;
361 }
362
363 static int function_stat_headers(struct seq_file *m)
364 {
365         seq_printf(m, "  Function                               Hit\n"
366                       "  --------                               ---\n");
367         return 0;
368 }
369
370 static int function_stat_show(struct seq_file *m, void *v)
371 {
372         struct dyn_ftrace *rec = v;
373         char str[KSYM_SYMBOL_LEN];
374
375         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
376
377         seq_printf(m, "  %-30.30s  %10lu\n", str, rec->counter);
378         return 0;
379 }
380
381 static struct tracer_stat function_stats = {
382         .name = "functions",
383         .stat_start = function_stat_start,
384         .stat_next = function_stat_next,
385         .stat_cmp = function_stat_cmp,
386         .stat_headers = function_stat_headers,
387         .stat_show = function_stat_show
388 };
389
390 static void ftrace_profile_init(int nr_funcs)
391 {
392         unsigned long addr;
393         int order;
394         int size;
395
396         /*
397          * We are profiling all functions, lets make it 1/4th of the
398          * number of functions that are in core kernel. So we have to
399          * iterate 4 times.
400          */
401         order = (sizeof(struct hlist_head) * nr_funcs) / 4;
402         order = get_order(order);
403         size = 1 << (PAGE_SHIFT + order);
404
405         pr_info("Allocating %d KB for profiler hash\n", size >> 10);
406
407         addr = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
408         if (!addr) {
409                 pr_warning("Could not allocate function profiler hash\n");
410                 return;
411         }
412
413         ftrace_profile_hash = (void *)addr;
414
415         /*
416          * struct hlist_head should be a pointer of 4 or 8 bytes.
417          * And a simple bit manipulation can be done, but if for
418          * some reason struct hlist_head is not a mulitple of 2,
419          * then we play it safe, and simply count. This function
420          * is done once at boot up, so it is not that critical in
421          * performance.
422          */
423
424         size--;
425         size /= sizeof(struct hlist_head);
426
427         for (; size; size >>= 1)
428                 ftrace_profile_bits++;
429
430         pr_info("Function profiler has %d hash buckets\n",
431                 1 << ftrace_profile_bits);
432
433         return;
434 }
435
436 static ssize_t
437 ftrace_profile_read(struct file *filp, char __user *ubuf,
438                      size_t cnt, loff_t *ppos)
439 {
440         char buf[64];
441         int r;
442
443         r = sprintf(buf, "%u\n", ftrace_profile_enabled);
444         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
445 }
446
447 static void ftrace_profile_reset(void)
448 {
449         struct dyn_ftrace *rec;
450         struct ftrace_page *pg;
451
452         do_for_each_ftrace_rec(pg, rec) {
453                 rec->counter = 0;
454         } while_for_each_ftrace_rec();
455 }
456
457 static struct dyn_ftrace *ftrace_find_profiled_func(unsigned long ip)
458 {
459         struct dyn_ftrace *rec;
460         struct hlist_head *hhd;
461         struct hlist_node *n;
462         unsigned long flags;
463         unsigned long key;
464
465         if (!ftrace_profile_hash)
466                 return NULL;
467
468         key = hash_long(ip, ftrace_profile_bits);
469         hhd = &ftrace_profile_hash[key];
470
471         if (hlist_empty(hhd))
472                 return NULL;
473
474         local_irq_save(flags);
475         hlist_for_each_entry_rcu(rec, n, hhd, node) {
476                 if (rec->ip == ip)
477                         goto out;
478         }
479         rec = NULL;
480  out:
481         local_irq_restore(flags);
482
483         return rec;
484 }
485
486 static void
487 function_profile_call(unsigned long ip, unsigned long parent_ip)
488 {
489         struct dyn_ftrace *rec;
490         unsigned long flags;
491
492         if (!ftrace_profile_enabled)
493                 return;
494
495         local_irq_save(flags);
496         rec = ftrace_find_profiled_func(ip);
497         if (!rec)
498                 goto out;
499
500         rec->counter++;
501  out:
502         local_irq_restore(flags);
503 }
504
505 static struct ftrace_ops ftrace_profile_ops __read_mostly =
506 {
507         .func = function_profile_call,
508 };
509
510 static ssize_t
511 ftrace_profile_write(struct file *filp, const char __user *ubuf,
512                      size_t cnt, loff_t *ppos)
513 {
514         unsigned long val;
515         char buf[64];
516         int ret;
517
518         if (!ftrace_profile_hash) {
519                 pr_info("Can not enable hash due to earlier problems\n");
520                 return -ENODEV;
521         }
522
523         if (cnt >= sizeof(buf))
524                 return -EINVAL;
525
526         if (copy_from_user(&buf, ubuf, cnt))
527                 return -EFAULT;
528
529         buf[cnt] = 0;
530
531         ret = strict_strtoul(buf, 10, &val);
532         if (ret < 0)
533                 return ret;
534
535         val = !!val;
536
537         mutex_lock(&ftrace_profile_lock);
538         if (ftrace_profile_enabled ^ val) {
539                 if (val) {
540                         ftrace_profile_reset();
541                         register_ftrace_function(&ftrace_profile_ops);
542                         ftrace_profile_enabled = 1;
543                 } else {
544                         ftrace_profile_enabled = 0;
545                         unregister_ftrace_function(&ftrace_profile_ops);
546                 }
547         }
548         mutex_unlock(&ftrace_profile_lock);
549
550         filp->f_pos += cnt;
551
552         return cnt;
553 }
554
555 static const struct file_operations ftrace_profile_fops = {
556         .open           = tracing_open_generic,
557         .read           = ftrace_profile_read,
558         .write          = ftrace_profile_write,
559 };
560
561 static void ftrace_profile_debugfs(struct dentry *d_tracer)
562 {
563         struct dentry *entry;
564         int ret;
565
566         ret = register_stat_tracer(&function_stats);
567         if (ret) {
568                 pr_warning("Warning: could not register "
569                            "function stats\n");
570                 return;
571         }
572
573         entry = debugfs_create_file("function_profile_enabled", 0644,
574                                     d_tracer, NULL, &ftrace_profile_fops);
575         if (!entry)
576                 pr_warning("Could not create debugfs "
577                            "'function_profile_enabled' entry\n");
578 }
579
580 static void ftrace_add_profile(struct dyn_ftrace *rec)
581 {
582         unsigned long key;
583
584         if (!ftrace_profile_hash)
585                 return;
586
587         key = hash_long(rec->ip, ftrace_profile_bits);
588         hlist_add_head_rcu(&rec->node, &ftrace_profile_hash[key]);
589 }
590
591 static void ftrace_profile_release(struct dyn_ftrace *rec)
592 {
593         mutex_lock(&ftrace_profile_lock);
594         hlist_del(&rec->node);
595         mutex_unlock(&ftrace_profile_lock);
596 }
597
598 #else /* CONFIG_FUNCTION_PROFILER */
599 static void ftrace_profile_init(int nr_funcs)
600 {
601 }
602 static void ftrace_add_profile(struct dyn_ftrace *rec)
603 {
604 }
605 static void ftrace_profile_debugfs(struct dentry *d_tracer)
606 {
607 }
608 static void ftrace_profile_release(struct dyn_ftrace *rec)
609 {
610 }
611 #endif /* CONFIG_FUNCTION_PROFILER */
612
613 #ifdef CONFIG_KPROBES
614
615 static int frozen_record_count;
616
617 static inline void freeze_record(struct dyn_ftrace *rec)
618 {
619         if (!(rec->flags & FTRACE_FL_FROZEN)) {
620                 rec->flags |= FTRACE_FL_FROZEN;
621                 frozen_record_count++;
622         }
623 }
624
625 static inline void unfreeze_record(struct dyn_ftrace *rec)
626 {
627         if (rec->flags & FTRACE_FL_FROZEN) {
628                 rec->flags &= ~FTRACE_FL_FROZEN;
629                 frozen_record_count--;
630         }
631 }
632
633 static inline int record_frozen(struct dyn_ftrace *rec)
634 {
635         return rec->flags & FTRACE_FL_FROZEN;
636 }
637 #else
638 # define freeze_record(rec)                     ({ 0; })
639 # define unfreeze_record(rec)                   ({ 0; })
640 # define record_frozen(rec)                     ({ 0; })
641 #endif /* CONFIG_KPROBES */
642
643 static void ftrace_free_rec(struct dyn_ftrace *rec)
644 {
645         rec->freelist = ftrace_free_records;
646         ftrace_free_records = rec;
647         rec->flags |= FTRACE_FL_FREE;
648 }
649
650 void ftrace_release(void *start, unsigned long size)
651 {
652         struct dyn_ftrace *rec;
653         struct ftrace_page *pg;
654         unsigned long s = (unsigned long)start;
655         unsigned long e = s + size;
656
657         if (ftrace_disabled || !start)
658                 return;
659
660         mutex_lock(&ftrace_lock);
661         do_for_each_ftrace_rec(pg, rec) {
662                 if ((rec->ip >= s) && (rec->ip < e) &&
663                     !(rec->flags & FTRACE_FL_FREE)) {
664                         ftrace_free_rec(rec);
665                         ftrace_profile_release(rec);
666                 }
667         } while_for_each_ftrace_rec();
668         mutex_unlock(&ftrace_lock);
669 }
670
671 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
672 {
673         struct dyn_ftrace *rec;
674
675         /* First check for freed records */
676         if (ftrace_free_records) {
677                 rec = ftrace_free_records;
678
679                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
680                         FTRACE_WARN_ON_ONCE(1);
681                         ftrace_free_records = NULL;
682                         return NULL;
683                 }
684
685                 ftrace_free_records = rec->freelist;
686                 memset(rec, 0, sizeof(*rec));
687                 return rec;
688         }
689
690         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
691                 if (!ftrace_pages->next) {
692                         /* allocate another page */
693                         ftrace_pages->next =
694                                 (void *)get_zeroed_page(GFP_KERNEL);
695                         if (!ftrace_pages->next)
696                                 return NULL;
697                 }
698                 ftrace_pages = ftrace_pages->next;
699         }
700
701         return &ftrace_pages->records[ftrace_pages->index++];
702 }
703
704 static struct dyn_ftrace *
705 ftrace_record_ip(unsigned long ip)
706 {
707         struct dyn_ftrace *rec;
708
709         if (ftrace_disabled)
710                 return NULL;
711
712         rec = ftrace_alloc_dyn_node(ip);
713         if (!rec)
714                 return NULL;
715
716         rec->ip = ip;
717         rec->newlist = ftrace_new_addrs;
718         ftrace_new_addrs = rec;
719
720         ftrace_add_profile(rec);
721
722         return rec;
723 }
724
725 static void print_ip_ins(const char *fmt, unsigned char *p)
726 {
727         int i;
728
729         printk(KERN_CONT "%s", fmt);
730
731         for (i = 0; i < MCOUNT_INSN_SIZE; i++)
732                 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
733 }
734
735 static void ftrace_bug(int failed, unsigned long ip)
736 {
737         switch (failed) {
738         case -EFAULT:
739                 FTRACE_WARN_ON_ONCE(1);
740                 pr_info("ftrace faulted on modifying ");
741                 print_ip_sym(ip);
742                 break;
743         case -EINVAL:
744                 FTRACE_WARN_ON_ONCE(1);
745                 pr_info("ftrace failed to modify ");
746                 print_ip_sym(ip);
747                 print_ip_ins(" actual: ", (unsigned char *)ip);
748                 printk(KERN_CONT "\n");
749                 break;
750         case -EPERM:
751                 FTRACE_WARN_ON_ONCE(1);
752                 pr_info("ftrace faulted on writing ");
753                 print_ip_sym(ip);
754                 break;
755         default:
756                 FTRACE_WARN_ON_ONCE(1);
757                 pr_info("ftrace faulted on unknown error ");
758                 print_ip_sym(ip);
759         }
760 }
761
762
763 static int
764 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
765 {
766         unsigned long ftrace_addr;
767         unsigned long ip, fl;
768
769         ftrace_addr = (unsigned long)FTRACE_ADDR;
770
771         ip = rec->ip;
772
773         /*
774          * If this record is not to be traced and
775          * it is not enabled then do nothing.
776          *
777          * If this record is not to be traced and
778          * it is enabled then disable it.
779          *
780          */
781         if (rec->flags & FTRACE_FL_NOTRACE) {
782                 if (rec->flags & FTRACE_FL_ENABLED)
783                         rec->flags &= ~FTRACE_FL_ENABLED;
784                 else
785                         return 0;
786
787         } else if (ftrace_filtered && enable) {
788                 /*
789                  * Filtering is on:
790                  */
791
792                 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
793
794                 /* Record is filtered and enabled, do nothing */
795                 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
796                         return 0;
797
798                 /* Record is not filtered or enabled, do nothing */
799                 if (!fl)
800                         return 0;
801
802                 /* Record is not filtered but enabled, disable it */
803                 if (fl == FTRACE_FL_ENABLED)
804                         rec->flags &= ~FTRACE_FL_ENABLED;
805                 else
806                 /* Otherwise record is filtered but not enabled, enable it */
807                         rec->flags |= FTRACE_FL_ENABLED;
808         } else {
809                 /* Disable or not filtered */
810
811                 if (enable) {
812                         /* if record is enabled, do nothing */
813                         if (rec->flags & FTRACE_FL_ENABLED)
814                                 return 0;
815
816                         rec->flags |= FTRACE_FL_ENABLED;
817
818                 } else {
819
820                         /* if record is not enabled, do nothing */
821                         if (!(rec->flags & FTRACE_FL_ENABLED))
822                                 return 0;
823
824                         rec->flags &= ~FTRACE_FL_ENABLED;
825                 }
826         }
827
828         if (rec->flags & FTRACE_FL_ENABLED)
829                 return ftrace_make_call(rec, ftrace_addr);
830         else
831                 return ftrace_make_nop(NULL, rec, ftrace_addr);
832 }
833
834 static void ftrace_replace_code(int enable)
835 {
836         struct dyn_ftrace *rec;
837         struct ftrace_page *pg;
838         int failed;
839
840         do_for_each_ftrace_rec(pg, rec) {
841                 /*
842                  * Skip over free records, records that have
843                  * failed and not converted.
844                  */
845                 if (rec->flags & FTRACE_FL_FREE ||
846                     rec->flags & FTRACE_FL_FAILED ||
847                     !(rec->flags & FTRACE_FL_CONVERTED))
848                         continue;
849
850                 /* ignore updates to this record's mcount site */
851                 if (get_kprobe((void *)rec->ip)) {
852                         freeze_record(rec);
853                         continue;
854                 } else {
855                         unfreeze_record(rec);
856                 }
857
858                 failed = __ftrace_replace_code(rec, enable);
859                 if (failed) {
860                         rec->flags |= FTRACE_FL_FAILED;
861                         if ((system_state == SYSTEM_BOOTING) ||
862                             !core_kernel_text(rec->ip)) {
863                                 ftrace_free_rec(rec);
864                                 } else {
865                                 ftrace_bug(failed, rec->ip);
866                                         /* Stop processing */
867                                         return;
868                                 }
869                 }
870         } while_for_each_ftrace_rec();
871 }
872
873 static int
874 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
875 {
876         unsigned long ip;
877         int ret;
878
879         ip = rec->ip;
880
881         ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
882         if (ret) {
883                 ftrace_bug(ret, ip);
884                 rec->flags |= FTRACE_FL_FAILED;
885                 return 0;
886         }
887         return 1;
888 }
889
890 /*
891  * archs can override this function if they must do something
892  * before the modifying code is performed.
893  */
894 int __weak ftrace_arch_code_modify_prepare(void)
895 {
896         return 0;
897 }
898
899 /*
900  * archs can override this function if they must do something
901  * after the modifying code is performed.
902  */
903 int __weak ftrace_arch_code_modify_post_process(void)
904 {
905         return 0;
906 }
907
908 static int __ftrace_modify_code(void *data)
909 {
910         int *command = data;
911
912         if (*command & FTRACE_ENABLE_CALLS)
913                 ftrace_replace_code(1);
914         else if (*command & FTRACE_DISABLE_CALLS)
915                 ftrace_replace_code(0);
916
917         if (*command & FTRACE_UPDATE_TRACE_FUNC)
918                 ftrace_update_ftrace_func(ftrace_trace_function);
919
920         if (*command & FTRACE_START_FUNC_RET)
921                 ftrace_enable_ftrace_graph_caller();
922         else if (*command & FTRACE_STOP_FUNC_RET)
923                 ftrace_disable_ftrace_graph_caller();
924
925         return 0;
926 }
927
928 static void ftrace_run_update_code(int command)
929 {
930         int ret;
931
932         ret = ftrace_arch_code_modify_prepare();
933         FTRACE_WARN_ON(ret);
934         if (ret)
935                 return;
936
937         stop_machine(__ftrace_modify_code, &command, NULL);
938
939         ret = ftrace_arch_code_modify_post_process();
940         FTRACE_WARN_ON(ret);
941 }
942
943 static ftrace_func_t saved_ftrace_func;
944 static int ftrace_start_up;
945
946 static void ftrace_startup_enable(int command)
947 {
948         if (saved_ftrace_func != ftrace_trace_function) {
949                 saved_ftrace_func = ftrace_trace_function;
950                 command |= FTRACE_UPDATE_TRACE_FUNC;
951         }
952
953         if (!command || !ftrace_enabled)
954                 return;
955
956         ftrace_run_update_code(command);
957 }
958
959 static void ftrace_startup(int command)
960 {
961         if (unlikely(ftrace_disabled))
962                 return;
963
964         ftrace_start_up++;
965         command |= FTRACE_ENABLE_CALLS;
966
967         ftrace_startup_enable(command);
968 }
969
970 static void ftrace_shutdown(int command)
971 {
972         if (unlikely(ftrace_disabled))
973                 return;
974
975         ftrace_start_up--;
976         if (!ftrace_start_up)
977                 command |= FTRACE_DISABLE_CALLS;
978
979         if (saved_ftrace_func != ftrace_trace_function) {
980                 saved_ftrace_func = ftrace_trace_function;
981                 command |= FTRACE_UPDATE_TRACE_FUNC;
982         }
983
984         if (!command || !ftrace_enabled)
985                 return;
986
987         ftrace_run_update_code(command);
988 }
989
990 static void ftrace_startup_sysctl(void)
991 {
992         int command = FTRACE_ENABLE_MCOUNT;
993
994         if (unlikely(ftrace_disabled))
995                 return;
996
997         /* Force update next time */
998         saved_ftrace_func = NULL;
999         /* ftrace_start_up is true if we want ftrace running */
1000         if (ftrace_start_up)
1001                 command |= FTRACE_ENABLE_CALLS;
1002
1003         ftrace_run_update_code(command);
1004 }
1005
1006 static void ftrace_shutdown_sysctl(void)
1007 {
1008         int command = FTRACE_DISABLE_MCOUNT;
1009
1010         if (unlikely(ftrace_disabled))
1011                 return;
1012
1013         /* ftrace_start_up is true if ftrace is running */
1014         if (ftrace_start_up)
1015                 command |= FTRACE_DISABLE_CALLS;
1016
1017         ftrace_run_update_code(command);
1018 }
1019
1020 static cycle_t          ftrace_update_time;
1021 static unsigned long    ftrace_update_cnt;
1022 unsigned long           ftrace_update_tot_cnt;
1023
1024 static int ftrace_update_code(struct module *mod)
1025 {
1026         struct dyn_ftrace *p;
1027         cycle_t start, stop;
1028
1029         start = ftrace_now(raw_smp_processor_id());
1030         ftrace_update_cnt = 0;
1031
1032         while (ftrace_new_addrs) {
1033
1034                 /* If something went wrong, bail without enabling anything */
1035                 if (unlikely(ftrace_disabled))
1036                         return -1;
1037
1038                 p = ftrace_new_addrs;
1039                 ftrace_new_addrs = p->newlist;
1040                 p->flags = 0L;
1041
1042                 /* convert record (i.e, patch mcount-call with NOP) */
1043                 if (ftrace_code_disable(mod, p)) {
1044                         p->flags |= FTRACE_FL_CONVERTED;
1045                         ftrace_update_cnt++;
1046                 } else
1047                         ftrace_free_rec(p);
1048         }
1049
1050         stop = ftrace_now(raw_smp_processor_id());
1051         ftrace_update_time = stop - start;
1052         ftrace_update_tot_cnt += ftrace_update_cnt;
1053
1054         return 0;
1055 }
1056
1057 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
1058 {
1059         struct ftrace_page *pg;
1060         int cnt;
1061         int i;
1062
1063         /* allocate a few pages */
1064         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
1065         if (!ftrace_pages_start)
1066                 return -1;
1067
1068         /*
1069          * Allocate a few more pages.
1070          *
1071          * TODO: have some parser search vmlinux before
1072          *   final linking to find all calls to ftrace.
1073          *   Then we can:
1074          *    a) know how many pages to allocate.
1075          *     and/or
1076          *    b) set up the table then.
1077          *
1078          *  The dynamic code is still necessary for
1079          *  modules.
1080          */
1081
1082         pg = ftrace_pages = ftrace_pages_start;
1083
1084         cnt = num_to_init / ENTRIES_PER_PAGE;
1085         pr_info("ftrace: allocating %ld entries in %d pages\n",
1086                 num_to_init, cnt + 1);
1087
1088         for (i = 0; i < cnt; i++) {
1089                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
1090
1091                 /* If we fail, we'll try later anyway */
1092                 if (!pg->next)
1093                         break;
1094
1095                 pg = pg->next;
1096         }
1097
1098         return 0;
1099 }
1100
1101 enum {
1102         FTRACE_ITER_FILTER      = (1 << 0),
1103         FTRACE_ITER_CONT        = (1 << 1),
1104         FTRACE_ITER_NOTRACE     = (1 << 2),
1105         FTRACE_ITER_FAILURES    = (1 << 3),
1106         FTRACE_ITER_PRINTALL    = (1 << 4),
1107         FTRACE_ITER_HASH        = (1 << 5),
1108 };
1109
1110 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
1111
1112 struct ftrace_iterator {
1113         struct ftrace_page      *pg;
1114         int                     hidx;
1115         int                     idx;
1116         unsigned                flags;
1117         unsigned char           buffer[FTRACE_BUFF_MAX+1];
1118         unsigned                buffer_idx;
1119         unsigned                filtered;
1120 };
1121
1122 static void *
1123 t_hash_next(struct seq_file *m, void *v, loff_t *pos)
1124 {
1125         struct ftrace_iterator *iter = m->private;
1126         struct hlist_node *hnd = v;
1127         struct hlist_head *hhd;
1128
1129         WARN_ON(!(iter->flags & FTRACE_ITER_HASH));
1130
1131         (*pos)++;
1132
1133  retry:
1134         if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
1135                 return NULL;
1136
1137         hhd = &ftrace_func_hash[iter->hidx];
1138
1139         if (hlist_empty(hhd)) {
1140                 iter->hidx++;
1141                 hnd = NULL;
1142                 goto retry;
1143         }
1144
1145         if (!hnd)
1146                 hnd = hhd->first;
1147         else {
1148                 hnd = hnd->next;
1149                 if (!hnd) {
1150                         iter->hidx++;
1151                         goto retry;
1152                 }
1153         }
1154
1155         return hnd;
1156 }
1157
1158 static void *t_hash_start(struct seq_file *m, loff_t *pos)
1159 {
1160         struct ftrace_iterator *iter = m->private;
1161         void *p = NULL;
1162
1163         iter->flags |= FTRACE_ITER_HASH;
1164
1165         return t_hash_next(m, p, pos);
1166 }
1167
1168 static int t_hash_show(struct seq_file *m, void *v)
1169 {
1170         struct ftrace_func_probe *rec;
1171         struct hlist_node *hnd = v;
1172         char str[KSYM_SYMBOL_LEN];
1173
1174         rec = hlist_entry(hnd, struct ftrace_func_probe, node);
1175
1176         if (rec->ops->print)
1177                 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
1178
1179         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1180         seq_printf(m, "%s:", str);
1181
1182         kallsyms_lookup((unsigned long)rec->ops->func, NULL, NULL, NULL, str);
1183         seq_printf(m, "%s", str);
1184
1185         if (rec->data)
1186                 seq_printf(m, ":%p", rec->data);
1187         seq_putc(m, '\n');
1188
1189         return 0;
1190 }
1191
1192 static void *
1193 t_next(struct seq_file *m, void *v, loff_t *pos)
1194 {
1195         struct ftrace_iterator *iter = m->private;
1196         struct dyn_ftrace *rec = NULL;
1197
1198         if (iter->flags & FTRACE_ITER_HASH)
1199                 return t_hash_next(m, v, pos);
1200
1201         (*pos)++;
1202
1203         if (iter->flags & FTRACE_ITER_PRINTALL)
1204                 return NULL;
1205
1206  retry:
1207         if (iter->idx >= iter->pg->index) {
1208                 if (iter->pg->next) {
1209                         iter->pg = iter->pg->next;
1210                         iter->idx = 0;
1211                         goto retry;
1212                 } else {
1213                         iter->idx = -1;
1214                 }
1215         } else {
1216                 rec = &iter->pg->records[iter->idx++];
1217                 if ((rec->flags & FTRACE_FL_FREE) ||
1218
1219                     (!(iter->flags & FTRACE_ITER_FAILURES) &&
1220                      (rec->flags & FTRACE_FL_FAILED)) ||
1221
1222                     ((iter->flags & FTRACE_ITER_FAILURES) &&
1223                      !(rec->flags & FTRACE_FL_FAILED)) ||
1224
1225                     ((iter->flags & FTRACE_ITER_FILTER) &&
1226                      !(rec->flags & FTRACE_FL_FILTER)) ||
1227
1228                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
1229                      !(rec->flags & FTRACE_FL_NOTRACE))) {
1230                         rec = NULL;
1231                         goto retry;
1232                 }
1233         }
1234
1235         return rec;
1236 }
1237
1238 static void *t_start(struct seq_file *m, loff_t *pos)
1239 {
1240         struct ftrace_iterator *iter = m->private;
1241         void *p = NULL;
1242
1243         mutex_lock(&ftrace_lock);
1244         /*
1245          * For set_ftrace_filter reading, if we have the filter
1246          * off, we can short cut and just print out that all
1247          * functions are enabled.
1248          */
1249         if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
1250                 if (*pos > 0)
1251                         return t_hash_start(m, pos);
1252                 iter->flags |= FTRACE_ITER_PRINTALL;
1253                 (*pos)++;
1254                 return iter;
1255         }
1256
1257         if (iter->flags & FTRACE_ITER_HASH)
1258                 return t_hash_start(m, pos);
1259
1260         if (*pos > 0) {
1261                 if (iter->idx < 0)
1262                         return p;
1263                 (*pos)--;
1264                 iter->idx--;
1265         }
1266
1267         p = t_next(m, p, pos);
1268
1269         if (!p)
1270                 return t_hash_start(m, pos);
1271
1272         return p;
1273 }
1274
1275 static void t_stop(struct seq_file *m, void *p)
1276 {
1277         mutex_unlock(&ftrace_lock);
1278 }
1279
1280 static int t_show(struct seq_file *m, void *v)
1281 {
1282         struct ftrace_iterator *iter = m->private;
1283         struct dyn_ftrace *rec = v;
1284         char str[KSYM_SYMBOL_LEN];
1285
1286         if (iter->flags & FTRACE_ITER_HASH)
1287                 return t_hash_show(m, v);
1288
1289         if (iter->flags & FTRACE_ITER_PRINTALL) {
1290                 seq_printf(m, "#### all functions enabled ####\n");
1291                 return 0;
1292         }
1293
1294         if (!rec)
1295                 return 0;
1296
1297         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1298
1299         seq_printf(m, "%s\n", str);
1300
1301         return 0;
1302 }
1303
1304 static struct seq_operations show_ftrace_seq_ops = {
1305         .start = t_start,
1306         .next = t_next,
1307         .stop = t_stop,
1308         .show = t_show,
1309 };
1310
1311 static int
1312 ftrace_avail_open(struct inode *inode, struct file *file)
1313 {
1314         struct ftrace_iterator *iter;
1315         int ret;
1316
1317         if (unlikely(ftrace_disabled))
1318                 return -ENODEV;
1319
1320         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1321         if (!iter)
1322                 return -ENOMEM;
1323
1324         iter->pg = ftrace_pages_start;
1325
1326         ret = seq_open(file, &show_ftrace_seq_ops);
1327         if (!ret) {
1328                 struct seq_file *m = file->private_data;
1329
1330                 m->private = iter;
1331         } else {
1332                 kfree(iter);
1333         }
1334
1335         return ret;
1336 }
1337
1338 int ftrace_avail_release(struct inode *inode, struct file *file)
1339 {
1340         struct seq_file *m = (struct seq_file *)file->private_data;
1341         struct ftrace_iterator *iter = m->private;
1342
1343         seq_release(inode, file);
1344         kfree(iter);
1345
1346         return 0;
1347 }
1348
1349 static int
1350 ftrace_failures_open(struct inode *inode, struct file *file)
1351 {
1352         int ret;
1353         struct seq_file *m;
1354         struct ftrace_iterator *iter;
1355
1356         ret = ftrace_avail_open(inode, file);
1357         if (!ret) {
1358                 m = (struct seq_file *)file->private_data;
1359                 iter = (struct ftrace_iterator *)m->private;
1360                 iter->flags = FTRACE_ITER_FAILURES;
1361         }
1362
1363         return ret;
1364 }
1365
1366
1367 static void ftrace_filter_reset(int enable)
1368 {
1369         struct ftrace_page *pg;
1370         struct dyn_ftrace *rec;
1371         unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1372
1373         mutex_lock(&ftrace_lock);
1374         if (enable)
1375                 ftrace_filtered = 0;
1376         do_for_each_ftrace_rec(pg, rec) {
1377                 if (rec->flags & FTRACE_FL_FAILED)
1378                         continue;
1379                 rec->flags &= ~type;
1380         } while_for_each_ftrace_rec();
1381         mutex_unlock(&ftrace_lock);
1382 }
1383
1384 static int
1385 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1386 {
1387         struct ftrace_iterator *iter;
1388         int ret = 0;
1389
1390         if (unlikely(ftrace_disabled))
1391                 return -ENODEV;
1392
1393         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1394         if (!iter)
1395                 return -ENOMEM;
1396
1397         mutex_lock(&ftrace_regex_lock);
1398         if ((file->f_mode & FMODE_WRITE) &&
1399             !(file->f_flags & O_APPEND))
1400                 ftrace_filter_reset(enable);
1401
1402         if (file->f_mode & FMODE_READ) {
1403                 iter->pg = ftrace_pages_start;
1404                 iter->flags = enable ? FTRACE_ITER_FILTER :
1405                         FTRACE_ITER_NOTRACE;
1406
1407                 ret = seq_open(file, &show_ftrace_seq_ops);
1408                 if (!ret) {
1409                         struct seq_file *m = file->private_data;
1410                         m->private = iter;
1411                 } else
1412                         kfree(iter);
1413         } else
1414                 file->private_data = iter;
1415         mutex_unlock(&ftrace_regex_lock);
1416
1417         return ret;
1418 }
1419
1420 static int
1421 ftrace_filter_open(struct inode *inode, struct file *file)
1422 {
1423         return ftrace_regex_open(inode, file, 1);
1424 }
1425
1426 static int
1427 ftrace_notrace_open(struct inode *inode, struct file *file)
1428 {
1429         return ftrace_regex_open(inode, file, 0);
1430 }
1431
1432 static loff_t
1433 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1434 {
1435         loff_t ret;
1436
1437         if (file->f_mode & FMODE_READ)
1438                 ret = seq_lseek(file, offset, origin);
1439         else
1440                 file->f_pos = ret = 1;
1441
1442         return ret;
1443 }
1444
1445 enum {
1446         MATCH_FULL,
1447         MATCH_FRONT_ONLY,
1448         MATCH_MIDDLE_ONLY,
1449         MATCH_END_ONLY,
1450 };
1451
1452 /*
1453  * (static function - no need for kernel doc)
1454  *
1455  * Pass in a buffer containing a glob and this function will
1456  * set search to point to the search part of the buffer and
1457  * return the type of search it is (see enum above).
1458  * This does modify buff.
1459  *
1460  * Returns enum type.
1461  *  search returns the pointer to use for comparison.
1462  *  not returns 1 if buff started with a '!'
1463  *     0 otherwise.
1464  */
1465 static int
1466 ftrace_setup_glob(char *buff, int len, char **search, int *not)
1467 {
1468         int type = MATCH_FULL;
1469         int i;
1470
1471         if (buff[0] == '!') {
1472                 *not = 1;
1473                 buff++;
1474                 len--;
1475         } else
1476                 *not = 0;
1477
1478         *search = buff;
1479
1480         for (i = 0; i < len; i++) {
1481                 if (buff[i] == '*') {
1482                         if (!i) {
1483                                 *search = buff + 1;
1484                                 type = MATCH_END_ONLY;
1485                         } else {
1486                                 if (type == MATCH_END_ONLY)
1487                                         type = MATCH_MIDDLE_ONLY;
1488                                 else
1489                                         type = MATCH_FRONT_ONLY;
1490                                 buff[i] = 0;
1491                                 break;
1492                         }
1493                 }
1494         }
1495
1496         return type;
1497 }
1498
1499 static int ftrace_match(char *str, char *regex, int len, int type)
1500 {
1501         int matched = 0;
1502         char *ptr;
1503
1504         switch (type) {
1505         case MATCH_FULL:
1506                 if (strcmp(str, regex) == 0)
1507                         matched = 1;
1508                 break;
1509         case MATCH_FRONT_ONLY:
1510                 if (strncmp(str, regex, len) == 0)
1511                         matched = 1;
1512                 break;
1513         case MATCH_MIDDLE_ONLY:
1514                 if (strstr(str, regex))
1515                         matched = 1;
1516                 break;
1517         case MATCH_END_ONLY:
1518                 ptr = strstr(str, regex);
1519                 if (ptr && (ptr[len] == 0))
1520                         matched = 1;
1521                 break;
1522         }
1523
1524         return matched;
1525 }
1526
1527 static int
1528 ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
1529 {
1530         char str[KSYM_SYMBOL_LEN];
1531
1532         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1533         return ftrace_match(str, regex, len, type);
1534 }
1535
1536 static void ftrace_match_records(char *buff, int len, int enable)
1537 {
1538         unsigned int search_len;
1539         struct ftrace_page *pg;
1540         struct dyn_ftrace *rec;
1541         unsigned long flag;
1542         char *search;
1543         int type;
1544         int not;
1545
1546         flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1547         type = ftrace_setup_glob(buff, len, &search, &not);
1548
1549         search_len = strlen(search);
1550
1551         mutex_lock(&ftrace_lock);
1552         do_for_each_ftrace_rec(pg, rec) {
1553
1554                 if (rec->flags & FTRACE_FL_FAILED)
1555                         continue;
1556
1557                 if (ftrace_match_record(rec, search, search_len, type)) {
1558                         if (not)
1559                                 rec->flags &= ~flag;
1560                         else
1561                                 rec->flags |= flag;
1562                 }
1563                 /*
1564                  * Only enable filtering if we have a function that
1565                  * is filtered on.
1566                  */
1567                 if (enable && (rec->flags & FTRACE_FL_FILTER))
1568                         ftrace_filtered = 1;
1569         } while_for_each_ftrace_rec();
1570         mutex_unlock(&ftrace_lock);
1571 }
1572
1573 static int
1574 ftrace_match_module_record(struct dyn_ftrace *rec, char *mod,
1575                            char *regex, int len, int type)
1576 {
1577         char str[KSYM_SYMBOL_LEN];
1578         char *modname;
1579
1580         kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
1581
1582         if (!modname || strcmp(modname, mod))
1583                 return 0;
1584
1585         /* blank search means to match all funcs in the mod */
1586         if (len)
1587                 return ftrace_match(str, regex, len, type);
1588         else
1589                 return 1;
1590 }
1591
1592 static void ftrace_match_module_records(char *buff, char *mod, int enable)
1593 {
1594         unsigned search_len = 0;
1595         struct ftrace_page *pg;
1596         struct dyn_ftrace *rec;
1597         int type = MATCH_FULL;
1598         char *search = buff;
1599         unsigned long flag;
1600         int not = 0;
1601
1602         flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1603
1604         /* blank or '*' mean the same */
1605         if (strcmp(buff, "*") == 0)
1606                 buff[0] = 0;
1607
1608         /* handle the case of 'dont filter this module' */
1609         if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
1610                 buff[0] = 0;
1611                 not = 1;
1612         }
1613
1614         if (strlen(buff)) {
1615                 type = ftrace_setup_glob(buff, strlen(buff), &search, &not);
1616                 search_len = strlen(search);
1617         }
1618
1619         mutex_lock(&ftrace_lock);
1620         do_for_each_ftrace_rec(pg, rec) {
1621
1622                 if (rec->flags & FTRACE_FL_FAILED)
1623                         continue;
1624
1625                 if (ftrace_match_module_record(rec, mod,
1626                                                search, search_len, type)) {
1627                         if (not)
1628                                 rec->flags &= ~flag;
1629                         else
1630                                 rec->flags |= flag;
1631                 }
1632                 if (enable && (rec->flags & FTRACE_FL_FILTER))
1633                         ftrace_filtered = 1;
1634
1635         } while_for_each_ftrace_rec();
1636         mutex_unlock(&ftrace_lock);
1637 }
1638
1639 /*
1640  * We register the module command as a template to show others how
1641  * to register the a command as well.
1642  */
1643
1644 static int
1645 ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
1646 {
1647         char *mod;
1648
1649         /*
1650          * cmd == 'mod' because we only registered this func
1651          * for the 'mod' ftrace_func_command.
1652          * But if you register one func with multiple commands,
1653          * you can tell which command was used by the cmd
1654          * parameter.
1655          */
1656
1657         /* we must have a module name */
1658         if (!param)
1659                 return -EINVAL;
1660
1661         mod = strsep(&param, ":");
1662         if (!strlen(mod))
1663                 return -EINVAL;
1664
1665         ftrace_match_module_records(func, mod, enable);
1666         return 0;
1667 }
1668
1669 static struct ftrace_func_command ftrace_mod_cmd = {
1670         .name                   = "mod",
1671         .func                   = ftrace_mod_callback,
1672 };
1673
1674 static int __init ftrace_mod_cmd_init(void)
1675 {
1676         return register_ftrace_command(&ftrace_mod_cmd);
1677 }
1678 device_initcall(ftrace_mod_cmd_init);
1679
1680 static void
1681 function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
1682 {
1683         struct ftrace_func_probe *entry;
1684         struct hlist_head *hhd;
1685         struct hlist_node *n;
1686         unsigned long key;
1687         int resched;
1688
1689         key = hash_long(ip, FTRACE_HASH_BITS);
1690
1691         hhd = &ftrace_func_hash[key];
1692
1693         if (hlist_empty(hhd))
1694                 return;
1695
1696         /*
1697          * Disable preemption for these calls to prevent a RCU grace
1698          * period. This syncs the hash iteration and freeing of items
1699          * on the hash. rcu_read_lock is too dangerous here.
1700          */
1701         resched = ftrace_preempt_disable();
1702         hlist_for_each_entry_rcu(entry, n, hhd, node) {
1703                 if (entry->ip == ip)
1704                         entry->ops->func(ip, parent_ip, &entry->data);
1705         }
1706         ftrace_preempt_enable(resched);
1707 }
1708
1709 static struct ftrace_ops trace_probe_ops __read_mostly =
1710 {
1711         .func = function_trace_probe_call,
1712 };
1713
1714 static int ftrace_probe_registered;
1715
1716 static void __enable_ftrace_function_probe(void)
1717 {
1718         int i;
1719
1720         if (ftrace_probe_registered)
1721                 return;
1722
1723         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1724                 struct hlist_head *hhd = &ftrace_func_hash[i];
1725                 if (hhd->first)
1726                         break;
1727         }
1728         /* Nothing registered? */
1729         if (i == FTRACE_FUNC_HASHSIZE)
1730                 return;
1731
1732         __register_ftrace_function(&trace_probe_ops);
1733         ftrace_startup(0);
1734         ftrace_probe_registered = 1;
1735 }
1736
1737 static void __disable_ftrace_function_probe(void)
1738 {
1739         int i;
1740
1741         if (!ftrace_probe_registered)
1742                 return;
1743
1744         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1745                 struct hlist_head *hhd = &ftrace_func_hash[i];
1746                 if (hhd->first)
1747                         return;
1748         }
1749
1750         /* no more funcs left */
1751         __unregister_ftrace_function(&trace_probe_ops);
1752         ftrace_shutdown(0);
1753         ftrace_probe_registered = 0;
1754 }
1755
1756
1757 static void ftrace_free_entry_rcu(struct rcu_head *rhp)
1758 {
1759         struct ftrace_func_probe *entry =
1760                 container_of(rhp, struct ftrace_func_probe, rcu);
1761
1762         if (entry->ops->free)
1763                 entry->ops->free(&entry->data);
1764         kfree(entry);
1765 }
1766
1767
1768 int
1769 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
1770                               void *data)
1771 {
1772         struct ftrace_func_probe *entry;
1773         struct ftrace_page *pg;
1774         struct dyn_ftrace *rec;
1775         int type, len, not;
1776         unsigned long key;
1777         int count = 0;
1778         char *search;
1779
1780         type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
1781         len = strlen(search);
1782
1783         /* we do not support '!' for function probes */
1784         if (WARN_ON(not))
1785                 return -EINVAL;
1786
1787         mutex_lock(&ftrace_lock);
1788         do_for_each_ftrace_rec(pg, rec) {
1789
1790                 if (rec->flags & FTRACE_FL_FAILED)
1791                         continue;
1792
1793                 if (!ftrace_match_record(rec, search, len, type))
1794                         continue;
1795
1796                 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1797                 if (!entry) {
1798                         /* If we did not process any, then return error */
1799                         if (!count)
1800                                 count = -ENOMEM;
1801                         goto out_unlock;
1802                 }
1803
1804                 count++;
1805
1806                 entry->data = data;
1807
1808                 /*
1809                  * The caller might want to do something special
1810                  * for each function we find. We call the callback
1811                  * to give the caller an opportunity to do so.
1812                  */
1813                 if (ops->callback) {
1814                         if (ops->callback(rec->ip, &entry->data) < 0) {
1815                                 /* caller does not like this func */
1816                                 kfree(entry);
1817                                 continue;
1818                         }
1819                 }
1820
1821                 entry->ops = ops;
1822                 entry->ip = rec->ip;
1823
1824                 key = hash_long(entry->ip, FTRACE_HASH_BITS);
1825                 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
1826
1827         } while_for_each_ftrace_rec();
1828         __enable_ftrace_function_probe();
1829
1830  out_unlock:
1831         mutex_unlock(&ftrace_lock);
1832
1833         return count;
1834 }
1835
1836 enum {
1837         PROBE_TEST_FUNC         = 1,
1838         PROBE_TEST_DATA         = 2
1839 };
1840
1841 static void
1842 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
1843                                   void *data, int flags)
1844 {
1845         struct ftrace_func_probe *entry;
1846         struct hlist_node *n, *tmp;
1847         char str[KSYM_SYMBOL_LEN];
1848         int type = MATCH_FULL;
1849         int i, len = 0;
1850         char *search;
1851
1852         if (glob && (strcmp(glob, "*") || !strlen(glob)))
1853                 glob = NULL;
1854         else {
1855                 int not;
1856
1857                 type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
1858                 len = strlen(search);
1859
1860                 /* we do not support '!' for function probes */
1861                 if (WARN_ON(not))
1862                         return;
1863         }
1864
1865         mutex_lock(&ftrace_lock);
1866         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1867                 struct hlist_head *hhd = &ftrace_func_hash[i];
1868
1869                 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
1870
1871                         /* break up if statements for readability */
1872                         if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
1873                                 continue;
1874
1875                         if ((flags & PROBE_TEST_DATA) && entry->data != data)
1876                                 continue;
1877
1878                         /* do this last, since it is the most expensive */
1879                         if (glob) {
1880                                 kallsyms_lookup(entry->ip, NULL, NULL,
1881                                                 NULL, str);
1882                                 if (!ftrace_match(str, glob, len, type))
1883                                         continue;
1884                         }
1885
1886                         hlist_del(&entry->node);
1887                         call_rcu(&entry->rcu, ftrace_free_entry_rcu);
1888                 }
1889         }
1890         __disable_ftrace_function_probe();
1891         mutex_unlock(&ftrace_lock);
1892 }
1893
1894 void
1895 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
1896                                 void *data)
1897 {
1898         __unregister_ftrace_function_probe(glob, ops, data,
1899                                           PROBE_TEST_FUNC | PROBE_TEST_DATA);
1900 }
1901
1902 void
1903 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
1904 {
1905         __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
1906 }
1907
1908 void unregister_ftrace_function_probe_all(char *glob)
1909 {
1910         __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
1911 }
1912
1913 static LIST_HEAD(ftrace_commands);
1914 static DEFINE_MUTEX(ftrace_cmd_mutex);
1915
1916 int register_ftrace_command(struct ftrace_func_command *cmd)
1917 {
1918         struct ftrace_func_command *p;
1919         int ret = 0;
1920
1921         mutex_lock(&ftrace_cmd_mutex);
1922         list_for_each_entry(p, &ftrace_commands, list) {
1923                 if (strcmp(cmd->name, p->name) == 0) {
1924                         ret = -EBUSY;
1925                         goto out_unlock;
1926                 }
1927         }
1928         list_add(&cmd->list, &ftrace_commands);
1929  out_unlock:
1930         mutex_unlock(&ftrace_cmd_mutex);
1931
1932         return ret;
1933 }
1934
1935 int unregister_ftrace_command(struct ftrace_func_command *cmd)
1936 {
1937         struct ftrace_func_command *p, *n;
1938         int ret = -ENODEV;
1939
1940         mutex_lock(&ftrace_cmd_mutex);
1941         list_for_each_entry_safe(p, n, &ftrace_commands, list) {
1942                 if (strcmp(cmd->name, p->name) == 0) {
1943                         ret = 0;
1944                         list_del_init(&p->list);
1945                         goto out_unlock;
1946                 }
1947         }
1948  out_unlock:
1949         mutex_unlock(&ftrace_cmd_mutex);
1950
1951         return ret;
1952 }
1953
1954 static int ftrace_process_regex(char *buff, int len, int enable)
1955 {
1956         char *func, *command, *next = buff;
1957         struct ftrace_func_command *p;
1958         int ret = -EINVAL;
1959
1960         func = strsep(&next, ":");
1961
1962         if (!next) {
1963                 ftrace_match_records(func, len, enable);
1964                 return 0;
1965         }
1966
1967         /* command found */
1968
1969         command = strsep(&next, ":");
1970
1971         mutex_lock(&ftrace_cmd_mutex);
1972         list_for_each_entry(p, &ftrace_commands, list) {
1973                 if (strcmp(p->name, command) == 0) {
1974                         ret = p->func(func, command, next, enable);
1975                         goto out_unlock;
1976                 }
1977         }
1978  out_unlock:
1979         mutex_unlock(&ftrace_cmd_mutex);
1980
1981         return ret;
1982 }
1983
1984 static ssize_t
1985 ftrace_regex_write(struct file *file, const char __user *ubuf,
1986                    size_t cnt, loff_t *ppos, int enable)
1987 {
1988         struct ftrace_iterator *iter;
1989         char ch;
1990         size_t read = 0;
1991         ssize_t ret;
1992
1993         if (!cnt || cnt < 0)
1994                 return 0;
1995
1996         mutex_lock(&ftrace_regex_lock);
1997
1998         if (file->f_mode & FMODE_READ) {
1999                 struct seq_file *m = file->private_data;
2000                 iter = m->private;
2001         } else
2002                 iter = file->private_data;
2003
2004         if (!*ppos) {
2005                 iter->flags &= ~FTRACE_ITER_CONT;
2006                 iter->buffer_idx = 0;
2007         }
2008
2009         ret = get_user(ch, ubuf++);
2010         if (ret)
2011                 goto out;
2012         read++;
2013         cnt--;
2014
2015         if (!(iter->flags & ~FTRACE_ITER_CONT)) {
2016                 /* skip white space */
2017                 while (cnt && isspace(ch)) {
2018                         ret = get_user(ch, ubuf++);
2019                         if (ret)
2020                                 goto out;
2021                         read++;
2022                         cnt--;
2023                 }
2024
2025                 if (isspace(ch)) {
2026                         file->f_pos += read;
2027                         ret = read;
2028                         goto out;
2029                 }
2030
2031                 iter->buffer_idx = 0;
2032         }
2033
2034         while (cnt && !isspace(ch)) {
2035                 if (iter->buffer_idx < FTRACE_BUFF_MAX)
2036                         iter->buffer[iter->buffer_idx++] = ch;
2037                 else {
2038                         ret = -EINVAL;
2039                         goto out;
2040                 }
2041                 ret = get_user(ch, ubuf++);
2042                 if (ret)
2043                         goto out;
2044                 read++;
2045                 cnt--;
2046         }
2047
2048         if (isspace(ch)) {
2049                 iter->filtered++;
2050                 iter->buffer[iter->buffer_idx] = 0;
2051                 ret = ftrace_process_regex(iter->buffer,
2052                                            iter->buffer_idx, enable);
2053                 if (ret)
2054                         goto out;
2055                 iter->buffer_idx = 0;
2056         } else
2057                 iter->flags |= FTRACE_ITER_CONT;
2058
2059
2060         file->f_pos += read;
2061
2062         ret = read;
2063  out:
2064         mutex_unlock(&ftrace_regex_lock);
2065
2066         return ret;
2067 }
2068
2069 static ssize_t
2070 ftrace_filter_write(struct file *file, const char __user *ubuf,
2071                     size_t cnt, loff_t *ppos)
2072 {
2073         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
2074 }
2075
2076 static ssize_t
2077 ftrace_notrace_write(struct file *file, const char __user *ubuf,
2078                      size_t cnt, loff_t *ppos)
2079 {
2080         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
2081 }
2082
2083 static void
2084 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
2085 {
2086         if (unlikely(ftrace_disabled))
2087                 return;
2088
2089         mutex_lock(&ftrace_regex_lock);
2090         if (reset)
2091                 ftrace_filter_reset(enable);
2092         if (buf)
2093                 ftrace_match_records(buf, len, enable);
2094         mutex_unlock(&ftrace_regex_lock);
2095 }
2096
2097 /**
2098  * ftrace_set_filter - set a function to filter on in ftrace
2099  * @buf - the string that holds the function filter text.
2100  * @len - the length of the string.
2101  * @reset - non zero to reset all filters before applying this filter.
2102  *
2103  * Filters denote which functions should be enabled when tracing is enabled.
2104  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2105  */
2106 void ftrace_set_filter(unsigned char *buf, int len, int reset)
2107 {
2108         ftrace_set_regex(buf, len, reset, 1);
2109 }
2110
2111 /**
2112  * ftrace_set_notrace - set a function to not trace in ftrace
2113  * @buf - the string that holds the function notrace text.
2114  * @len - the length of the string.
2115  * @reset - non zero to reset all filters before applying this filter.
2116  *
2117  * Notrace Filters denote which functions should not be enabled when tracing
2118  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2119  * for tracing.
2120  */
2121 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
2122 {
2123         ftrace_set_regex(buf, len, reset, 0);
2124 }
2125
2126 static int
2127 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
2128 {
2129         struct seq_file *m = (struct seq_file *)file->private_data;
2130         struct ftrace_iterator *iter;
2131
2132         mutex_lock(&ftrace_regex_lock);
2133         if (file->f_mode & FMODE_READ) {
2134                 iter = m->private;
2135
2136                 seq_release(inode, file);
2137         } else
2138                 iter = file->private_data;
2139
2140         if (iter->buffer_idx) {
2141                 iter->filtered++;
2142                 iter->buffer[iter->buffer_idx] = 0;
2143                 ftrace_match_records(iter->buffer, iter->buffer_idx, enable);
2144         }
2145
2146         mutex_lock(&ftrace_lock);
2147         if (ftrace_start_up && ftrace_enabled)
2148                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
2149         mutex_unlock(&ftrace_lock);
2150
2151         kfree(iter);
2152         mutex_unlock(&ftrace_regex_lock);
2153         return 0;
2154 }
2155
2156 static int
2157 ftrace_filter_release(struct inode *inode, struct file *file)
2158 {
2159         return ftrace_regex_release(inode, file, 1);
2160 }
2161
2162 static int
2163 ftrace_notrace_release(struct inode *inode, struct file *file)
2164 {
2165         return ftrace_regex_release(inode, file, 0);
2166 }
2167
2168 static const struct file_operations ftrace_avail_fops = {
2169         .open = ftrace_avail_open,
2170         .read = seq_read,
2171         .llseek = seq_lseek,
2172         .release = ftrace_avail_release,
2173 };
2174
2175 static const struct file_operations ftrace_failures_fops = {
2176         .open = ftrace_failures_open,
2177         .read = seq_read,
2178         .llseek = seq_lseek,
2179         .release = ftrace_avail_release,
2180 };
2181
2182 static const struct file_operations ftrace_filter_fops = {
2183         .open = ftrace_filter_open,
2184         .read = seq_read,
2185         .write = ftrace_filter_write,
2186         .llseek = ftrace_regex_lseek,
2187         .release = ftrace_filter_release,
2188 };
2189
2190 static const struct file_operations ftrace_notrace_fops = {
2191         .open = ftrace_notrace_open,
2192         .read = seq_read,
2193         .write = ftrace_notrace_write,
2194         .llseek = ftrace_regex_lseek,
2195         .release = ftrace_notrace_release,
2196 };
2197
2198 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2199
2200 static DEFINE_MUTEX(graph_lock);
2201
2202 int ftrace_graph_count;
2203 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
2204
2205 static void *
2206 g_next(struct seq_file *m, void *v, loff_t *pos)
2207 {
2208         unsigned long *array = m->private;
2209         int index = *pos;
2210
2211         (*pos)++;
2212
2213         if (index >= ftrace_graph_count)
2214                 return NULL;
2215
2216         return &array[index];
2217 }
2218
2219 static void *g_start(struct seq_file *m, loff_t *pos)
2220 {
2221         void *p = NULL;
2222
2223         mutex_lock(&graph_lock);
2224
2225         /* Nothing, tell g_show to print all functions are enabled */
2226         if (!ftrace_graph_count && !*pos)
2227                 return (void *)1;
2228
2229         p = g_next(m, p, pos);
2230
2231         return p;
2232 }
2233
2234 static void g_stop(struct seq_file *m, void *p)
2235 {
2236         mutex_unlock(&graph_lock);
2237 }
2238
2239 static int g_show(struct seq_file *m, void *v)
2240 {
2241         unsigned long *ptr = v;
2242         char str[KSYM_SYMBOL_LEN];
2243
2244         if (!ptr)
2245                 return 0;
2246
2247         if (ptr == (unsigned long *)1) {
2248                 seq_printf(m, "#### all functions enabled ####\n");
2249                 return 0;
2250         }
2251
2252         kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
2253
2254         seq_printf(m, "%s\n", str);
2255
2256         return 0;
2257 }
2258
2259 static struct seq_operations ftrace_graph_seq_ops = {
2260         .start = g_start,
2261         .next = g_next,
2262         .stop = g_stop,
2263         .show = g_show,
2264 };
2265
2266 static int
2267 ftrace_graph_open(struct inode *inode, struct file *file)
2268 {
2269         int ret = 0;
2270
2271         if (unlikely(ftrace_disabled))
2272                 return -ENODEV;
2273
2274         mutex_lock(&graph_lock);
2275         if ((file->f_mode & FMODE_WRITE) &&
2276             !(file->f_flags & O_APPEND)) {
2277                 ftrace_graph_count = 0;
2278                 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
2279         }
2280
2281         if (file->f_mode & FMODE_READ) {
2282                 ret = seq_open(file, &ftrace_graph_seq_ops);
2283                 if (!ret) {
2284                         struct seq_file *m = file->private_data;
2285                         m->private = ftrace_graph_funcs;
2286                 }
2287         } else
2288                 file->private_data = ftrace_graph_funcs;
2289         mutex_unlock(&graph_lock);
2290
2291         return ret;
2292 }
2293
2294 static int
2295 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
2296 {
2297         struct dyn_ftrace *rec;
2298         struct ftrace_page *pg;
2299         int search_len;
2300         int found = 0;
2301         int type, not;
2302         char *search;
2303         bool exists;
2304         int i;
2305
2306         if (ftrace_disabled)
2307                 return -ENODEV;
2308
2309         /* decode regex */
2310         type = ftrace_setup_glob(buffer, strlen(buffer), &search, &not);
2311         if (not)
2312                 return -EINVAL;
2313
2314         search_len = strlen(search);
2315
2316         mutex_lock(&ftrace_lock);
2317         do_for_each_ftrace_rec(pg, rec) {
2318
2319                 if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
2320                         break;
2321
2322                 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
2323                         continue;
2324
2325                 if (ftrace_match_record(rec, search, search_len, type)) {
2326                         /* ensure it is not already in the array */
2327                         exists = false;
2328                         for (i = 0; i < *idx; i++)
2329                                 if (array[i] == rec->ip) {
2330                                         exists = true;
2331                                         break;
2332                                 }
2333                         if (!exists) {
2334                                 array[(*idx)++] = rec->ip;
2335                                 found = 1;
2336                         }
2337                 }
2338         } while_for_each_ftrace_rec();
2339
2340         mutex_unlock(&ftrace_lock);
2341
2342         return found ? 0 : -EINVAL;
2343 }
2344
2345 static ssize_t
2346 ftrace_graph_write(struct file *file, const char __user *ubuf,
2347                    size_t cnt, loff_t *ppos)
2348 {
2349         unsigned char buffer[FTRACE_BUFF_MAX+1];
2350         unsigned long *array;
2351         size_t read = 0;
2352         ssize_t ret;
2353         int index = 0;
2354         char ch;
2355
2356         if (!cnt || cnt < 0)
2357                 return 0;
2358
2359         mutex_lock(&graph_lock);
2360
2361         if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
2362                 ret = -EBUSY;
2363                 goto out;
2364         }
2365
2366         if (file->f_mode & FMODE_READ) {
2367                 struct seq_file *m = file->private_data;
2368                 array = m->private;
2369         } else
2370                 array = file->private_data;
2371
2372         ret = get_user(ch, ubuf++);
2373         if (ret)
2374                 goto out;
2375         read++;
2376         cnt--;
2377
2378         /* skip white space */
2379         while (cnt && isspace(ch)) {
2380                 ret = get_user(ch, ubuf++);
2381                 if (ret)
2382                         goto out;
2383                 read++;
2384                 cnt--;
2385         }
2386
2387         if (isspace(ch)) {
2388                 *ppos += read;
2389                 ret = read;
2390                 goto out;
2391         }
2392
2393         while (cnt && !isspace(ch)) {
2394                 if (index < FTRACE_BUFF_MAX)
2395                         buffer[index++] = ch;
2396                 else {
2397                         ret = -EINVAL;
2398                         goto out;
2399                 }
2400                 ret = get_user(ch, ubuf++);
2401                 if (ret)
2402                         goto out;
2403                 read++;
2404                 cnt--;
2405         }
2406         buffer[index] = 0;
2407
2408         /* we allow only one expression at a time */
2409         ret = ftrace_set_func(array, &ftrace_graph_count, buffer);
2410         if (ret)
2411                 goto out;
2412
2413         file->f_pos += read;
2414
2415         ret = read;
2416  out:
2417         mutex_unlock(&graph_lock);
2418
2419         return ret;
2420 }
2421
2422 static const struct file_operations ftrace_graph_fops = {
2423         .open = ftrace_graph_open,
2424         .read = seq_read,
2425         .write = ftrace_graph_write,
2426 };
2427 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2428
2429 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
2430 {
2431         struct dentry *entry;
2432
2433         entry = debugfs_create_file("available_filter_functions", 0444,
2434                                     d_tracer, NULL, &ftrace_avail_fops);
2435         if (!entry)
2436                 pr_warning("Could not create debugfs "
2437                            "'available_filter_functions' entry\n");
2438
2439         entry = debugfs_create_file("failures", 0444,
2440                                     d_tracer, NULL, &ftrace_failures_fops);
2441         if (!entry)
2442                 pr_warning("Could not create debugfs 'failures' entry\n");
2443
2444         entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
2445                                     NULL, &ftrace_filter_fops);
2446         if (!entry)
2447                 pr_warning("Could not create debugfs "
2448                            "'set_ftrace_filter' entry\n");
2449
2450         entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
2451                                     NULL, &ftrace_notrace_fops);
2452         if (!entry)
2453                 pr_warning("Could not create debugfs "
2454                            "'set_ftrace_notrace' entry\n");
2455
2456 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2457         entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
2458                                     NULL,
2459                                     &ftrace_graph_fops);
2460         if (!entry)
2461                 pr_warning("Could not create debugfs "
2462                            "'set_graph_function' entry\n");
2463 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2464
2465         ftrace_profile_debugfs(d_tracer);
2466
2467         return 0;
2468 }
2469
2470 static int ftrace_convert_nops(struct module *mod,
2471                                unsigned long *start,
2472                                unsigned long *end)
2473 {
2474         unsigned long *p;
2475         unsigned long addr;
2476         unsigned long flags;
2477
2478         mutex_lock(&ftrace_lock);
2479         p = start;
2480         while (p < end) {
2481                 addr = ftrace_call_adjust(*p++);
2482                 /*
2483                  * Some architecture linkers will pad between
2484                  * the different mcount_loc sections of different
2485                  * object files to satisfy alignments.
2486                  * Skip any NULL pointers.
2487                  */
2488                 if (!addr)
2489                         continue;
2490                 ftrace_record_ip(addr);
2491         }
2492
2493         /* disable interrupts to prevent kstop machine */
2494         local_irq_save(flags);
2495         ftrace_update_code(mod);
2496         local_irq_restore(flags);
2497         mutex_unlock(&ftrace_lock);
2498
2499         return 0;
2500 }
2501
2502 void ftrace_init_module(struct module *mod,
2503                         unsigned long *start, unsigned long *end)
2504 {
2505         if (ftrace_disabled || start == end)
2506                 return;
2507         ftrace_convert_nops(mod, start, end);
2508 }
2509
2510 extern unsigned long __start_mcount_loc[];
2511 extern unsigned long __stop_mcount_loc[];
2512
2513 void __init ftrace_init(void)
2514 {
2515         unsigned long count, addr, flags;
2516         int ret;
2517
2518         /* Keep the ftrace pointer to the stub */
2519         addr = (unsigned long)ftrace_stub;
2520
2521         local_irq_save(flags);
2522         ftrace_dyn_arch_init(&addr);
2523         local_irq_restore(flags);
2524
2525         /* ftrace_dyn_arch_init places the return code in addr */
2526         if (addr)
2527                 goto failed;
2528
2529         count = __stop_mcount_loc - __start_mcount_loc;
2530
2531         ret = ftrace_dyn_table_alloc(count);
2532         if (ret)
2533                 goto failed;
2534
2535         ftrace_profile_init(count);
2536
2537         last_ftrace_enabled = ftrace_enabled = 1;
2538
2539         ret = ftrace_convert_nops(NULL,
2540                                   __start_mcount_loc,
2541                                   __stop_mcount_loc);
2542
2543         return;
2544  failed:
2545         ftrace_disabled = 1;
2546 }
2547
2548 #else
2549
2550 static int __init ftrace_nodyn_init(void)
2551 {
2552         ftrace_enabled = 1;
2553         return 0;
2554 }
2555 device_initcall(ftrace_nodyn_init);
2556
2557 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
2558 static inline void ftrace_startup_enable(int command) { }
2559 /* Keep as macros so we do not need to define the commands */
2560 # define ftrace_startup(command)        do { } while (0)
2561 # define ftrace_shutdown(command)       do { } while (0)
2562 # define ftrace_startup_sysctl()        do { } while (0)
2563 # define ftrace_shutdown_sysctl()       do { } while (0)
2564 #endif /* CONFIG_DYNAMIC_FTRACE */
2565
2566 static ssize_t
2567 ftrace_pid_read(struct file *file, char __user *ubuf,
2568                        size_t cnt, loff_t *ppos)
2569 {
2570         char buf[64];
2571         int r;
2572
2573         if (ftrace_pid_trace == ftrace_swapper_pid)
2574                 r = sprintf(buf, "swapper tasks\n");
2575         else if (ftrace_pid_trace)
2576                 r = sprintf(buf, "%u\n", pid_vnr(ftrace_pid_trace));
2577         else
2578                 r = sprintf(buf, "no pid\n");
2579
2580         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2581 }
2582
2583 static void clear_ftrace_swapper(void)
2584 {
2585         struct task_struct *p;
2586         int cpu;
2587
2588         get_online_cpus();
2589         for_each_online_cpu(cpu) {
2590                 p = idle_task(cpu);
2591                 clear_tsk_trace_trace(p);
2592         }
2593         put_online_cpus();
2594 }
2595
2596 static void set_ftrace_swapper(void)
2597 {
2598         struct task_struct *p;
2599         int cpu;
2600
2601         get_online_cpus();
2602         for_each_online_cpu(cpu) {
2603                 p = idle_task(cpu);
2604                 set_tsk_trace_trace(p);
2605         }
2606         put_online_cpus();
2607 }
2608
2609 static void clear_ftrace_pid(struct pid *pid)
2610 {
2611         struct task_struct *p;
2612
2613         rcu_read_lock();
2614         do_each_pid_task(pid, PIDTYPE_PID, p) {
2615                 clear_tsk_trace_trace(p);
2616         } while_each_pid_task(pid, PIDTYPE_PID, p);
2617         rcu_read_unlock();
2618
2619         put_pid(pid);
2620 }
2621
2622 static void set_ftrace_pid(struct pid *pid)
2623 {
2624         struct task_struct *p;
2625
2626         rcu_read_lock();
2627         do_each_pid_task(pid, PIDTYPE_PID, p) {
2628                 set_tsk_trace_trace(p);
2629         } while_each_pid_task(pid, PIDTYPE_PID, p);
2630         rcu_read_unlock();
2631 }
2632
2633 static void clear_ftrace_pid_task(struct pid **pid)
2634 {
2635         if (*pid == ftrace_swapper_pid)
2636                 clear_ftrace_swapper();
2637         else
2638                 clear_ftrace_pid(*pid);
2639
2640         *pid = NULL;
2641 }
2642
2643 static void set_ftrace_pid_task(struct pid *pid)
2644 {
2645         if (pid == ftrace_swapper_pid)
2646                 set_ftrace_swapper();
2647         else
2648                 set_ftrace_pid(pid);
2649 }
2650
2651 static ssize_t
2652 ftrace_pid_write(struct file *filp, const char __user *ubuf,
2653                    size_t cnt, loff_t *ppos)
2654 {
2655         struct pid *pid;
2656         char buf[64];
2657         long val;
2658         int ret;
2659
2660         if (cnt >= sizeof(buf))
2661                 return -EINVAL;
2662
2663         if (copy_from_user(&buf, ubuf, cnt))
2664                 return -EFAULT;
2665
2666         buf[cnt] = 0;
2667
2668         ret = strict_strtol(buf, 10, &val);
2669         if (ret < 0)
2670                 return ret;
2671
2672         mutex_lock(&ftrace_lock);
2673         if (val < 0) {
2674                 /* disable pid tracing */
2675                 if (!ftrace_pid_trace)
2676                         goto out;
2677
2678                 clear_ftrace_pid_task(&ftrace_pid_trace);
2679
2680         } else {
2681                 /* swapper task is special */
2682                 if (!val) {
2683                         pid = ftrace_swapper_pid;
2684                         if (pid == ftrace_pid_trace)
2685                                 goto out;
2686                 } else {
2687                         pid = find_get_pid(val);
2688
2689                         if (pid == ftrace_pid_trace) {
2690                                 put_pid(pid);
2691                                 goto out;
2692                         }
2693                 }
2694
2695                 if (ftrace_pid_trace)
2696                         clear_ftrace_pid_task(&ftrace_pid_trace);
2697
2698                 if (!pid)
2699                         goto out;
2700
2701                 ftrace_pid_trace = pid;
2702
2703                 set_ftrace_pid_task(ftrace_pid_trace);
2704         }
2705
2706         /* update the function call */
2707         ftrace_update_pid_func();
2708         ftrace_startup_enable(0);
2709
2710  out:
2711         mutex_unlock(&ftrace_lock);
2712
2713         return cnt;
2714 }
2715
2716 static const struct file_operations ftrace_pid_fops = {
2717         .read = ftrace_pid_read,
2718         .write = ftrace_pid_write,
2719 };
2720
2721 static __init int ftrace_init_debugfs(void)
2722 {
2723         struct dentry *d_tracer;
2724         struct dentry *entry;
2725
2726         d_tracer = tracing_init_dentry();
2727         if (!d_tracer)
2728                 return 0;
2729
2730         ftrace_init_dyn_debugfs(d_tracer);
2731
2732         entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
2733                                     NULL, &ftrace_pid_fops);
2734         if (!entry)
2735                 pr_warning("Could not create debugfs "
2736                            "'set_ftrace_pid' entry\n");
2737         return 0;
2738 }
2739 fs_initcall(ftrace_init_debugfs);
2740
2741 /**
2742  * ftrace_kill - kill ftrace
2743  *
2744  * This function should be used by panic code. It stops ftrace
2745  * but in a not so nice way. If you need to simply kill ftrace
2746  * from a non-atomic section, use ftrace_kill.
2747  */
2748 void ftrace_kill(void)
2749 {
2750         ftrace_disabled = 1;
2751         ftrace_enabled = 0;
2752         clear_ftrace_function();
2753 }
2754
2755 /**
2756  * register_ftrace_function - register a function for profiling
2757  * @ops - ops structure that holds the function for profiling.
2758  *
2759  * Register a function to be called by all functions in the
2760  * kernel.
2761  *
2762  * Note: @ops->func and all the functions it calls must be labeled
2763  *       with "notrace", otherwise it will go into a
2764  *       recursive loop.
2765  */
2766 int register_ftrace_function(struct ftrace_ops *ops)
2767 {
2768         int ret;
2769
2770         if (unlikely(ftrace_disabled))
2771                 return -1;
2772
2773         mutex_lock(&ftrace_lock);
2774
2775         ret = __register_ftrace_function(ops);
2776         ftrace_startup(0);
2777
2778         mutex_unlock(&ftrace_lock);
2779         return ret;
2780 }
2781
2782 /**
2783  * unregister_ftrace_function - unregister a function for profiling.
2784  * @ops - ops structure that holds the function to unregister
2785  *
2786  * Unregister a function that was added to be called by ftrace profiling.
2787  */
2788 int unregister_ftrace_function(struct ftrace_ops *ops)
2789 {
2790         int ret;
2791
2792         mutex_lock(&ftrace_lock);
2793         ret = __unregister_ftrace_function(ops);
2794         ftrace_shutdown(0);
2795         mutex_unlock(&ftrace_lock);
2796
2797         return ret;
2798 }
2799
2800 int
2801 ftrace_enable_sysctl(struct ctl_table *table, int write,
2802                      struct file *file, void __user *buffer, size_t *lenp,
2803                      loff_t *ppos)
2804 {
2805         int ret;
2806
2807         if (unlikely(ftrace_disabled))
2808                 return -ENODEV;
2809
2810         mutex_lock(&ftrace_lock);
2811
2812         ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
2813
2814         if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
2815                 goto out;
2816
2817         last_ftrace_enabled = ftrace_enabled;
2818
2819         if (ftrace_enabled) {
2820
2821                 ftrace_startup_sysctl();
2822
2823                 /* we are starting ftrace again */
2824                 if (ftrace_list != &ftrace_list_end) {
2825                         if (ftrace_list->next == &ftrace_list_end)
2826                                 ftrace_trace_function = ftrace_list->func;
2827                         else
2828                                 ftrace_trace_function = ftrace_list_func;
2829                 }
2830
2831         } else {
2832                 /* stopping ftrace calls (just send to ftrace_stub) */
2833                 ftrace_trace_function = ftrace_stub;
2834
2835                 ftrace_shutdown_sysctl();
2836         }
2837
2838  out:
2839         mutex_unlock(&ftrace_lock);
2840         return ret;
2841 }
2842
2843 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2844
2845 static atomic_t ftrace_graph_active;
2846 static struct notifier_block ftrace_suspend_notifier;
2847
2848 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
2849 {
2850         return 0;
2851 }
2852
2853 /* The callbacks that hook a function */
2854 trace_func_graph_ret_t ftrace_graph_return =
2855                         (trace_func_graph_ret_t)ftrace_stub;
2856 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
2857
2858 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
2859 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
2860 {
2861         int i;
2862         int ret = 0;
2863         unsigned long flags;
2864         int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
2865         struct task_struct *g, *t;
2866
2867         for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
2868                 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
2869                                         * sizeof(struct ftrace_ret_stack),
2870                                         GFP_KERNEL);
2871                 if (!ret_stack_list[i]) {
2872                         start = 0;
2873                         end = i;
2874                         ret = -ENOMEM;
2875                         goto free;
2876                 }
2877         }
2878
2879         read_lock_irqsave(&tasklist_lock, flags);
2880         do_each_thread(g, t) {
2881                 if (start == end) {
2882                         ret = -EAGAIN;
2883                         goto unlock;
2884                 }
2885
2886                 if (t->ret_stack == NULL) {
2887                         t->curr_ret_stack = -1;
2888                         /* Make sure IRQs see the -1 first: */
2889                         barrier();
2890                         t->ret_stack = ret_stack_list[start++];
2891                         atomic_set(&t->tracing_graph_pause, 0);
2892                         atomic_set(&t->trace_overrun, 0);
2893                 }
2894         } while_each_thread(g, t);
2895
2896 unlock:
2897         read_unlock_irqrestore(&tasklist_lock, flags);
2898 free:
2899         for (i = start; i < end; i++)
2900                 kfree(ret_stack_list[i]);
2901         return ret;
2902 }
2903
2904 static void
2905 ftrace_graph_probe_sched_switch(struct rq *__rq, struct task_struct *prev,
2906                                 struct task_struct *next)
2907 {
2908         unsigned long long timestamp;
2909         int index;
2910
2911         /*
2912          * Does the user want to count the time a function was asleep.
2913          * If so, do not update the time stamps.
2914          */
2915         if (trace_flags & TRACE_ITER_SLEEP_TIME)
2916                 return;
2917
2918         timestamp = trace_clock_local();
2919
2920         prev->ftrace_timestamp = timestamp;
2921
2922         /* only process tasks that we timestamped */
2923         if (!next->ftrace_timestamp)
2924                 return;
2925
2926         /*
2927          * Update all the counters in next to make up for the
2928          * time next was sleeping.
2929          */
2930         timestamp -= next->ftrace_timestamp;
2931
2932         for (index = next->curr_ret_stack; index >= 0; index--)
2933                 next->ret_stack[index].calltime += timestamp;
2934 }
2935
2936 /* Allocate a return stack for each task */
2937 static int start_graph_tracing(void)
2938 {
2939         struct ftrace_ret_stack **ret_stack_list;
2940         int ret, cpu;
2941
2942         ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
2943                                 sizeof(struct ftrace_ret_stack *),
2944                                 GFP_KERNEL);
2945
2946         if (!ret_stack_list)
2947                 return -ENOMEM;
2948
2949         /* The cpu_boot init_task->ret_stack will never be freed */
2950         for_each_online_cpu(cpu)
2951                 ftrace_graph_init_task(idle_task(cpu));
2952
2953         do {
2954                 ret = alloc_retstack_tasklist(ret_stack_list);
2955         } while (ret == -EAGAIN);
2956
2957         if (!ret) {
2958                 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch);
2959                 if (ret)
2960                         pr_info("ftrace_graph: Couldn't activate tracepoint"
2961                                 " probe to kernel_sched_switch\n");
2962         }
2963
2964         kfree(ret_stack_list);
2965         return ret;
2966 }
2967
2968 /*
2969  * Hibernation protection.
2970  * The state of the current task is too much unstable during
2971  * suspend/restore to disk. We want to protect against that.
2972  */
2973 static int
2974 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
2975                                                         void *unused)
2976 {
2977         switch (state) {
2978         case PM_HIBERNATION_PREPARE:
2979                 pause_graph_tracing();
2980                 break;
2981
2982         case PM_POST_HIBERNATION:
2983                 unpause_graph_tracing();
2984                 break;
2985         }
2986         return NOTIFY_DONE;
2987 }
2988
2989 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2990                         trace_func_graph_ent_t entryfunc)
2991 {
2992         int ret = 0;
2993
2994         mutex_lock(&ftrace_lock);
2995
2996         /* we currently allow only one tracer registered at a time */
2997         if (atomic_read(&ftrace_graph_active)) {
2998                 ret = -EBUSY;
2999                 goto out;
3000         }
3001
3002         ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
3003         register_pm_notifier(&ftrace_suspend_notifier);
3004
3005         atomic_inc(&ftrace_graph_active);
3006         ret = start_graph_tracing();
3007         if (ret) {
3008                 atomic_dec(&ftrace_graph_active);
3009                 goto out;
3010         }
3011
3012         ftrace_graph_return = retfunc;
3013         ftrace_graph_entry = entryfunc;
3014
3015         ftrace_startup(FTRACE_START_FUNC_RET);
3016
3017 out:
3018         mutex_unlock(&ftrace_lock);
3019         return ret;
3020 }
3021
3022 void unregister_ftrace_graph(void)
3023 {
3024         mutex_lock(&ftrace_lock);
3025
3026         atomic_dec(&ftrace_graph_active);
3027         unregister_trace_sched_switch(ftrace_graph_probe_sched_switch);
3028         ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
3029         ftrace_graph_entry = ftrace_graph_entry_stub;
3030         ftrace_shutdown(FTRACE_STOP_FUNC_RET);
3031         unregister_pm_notifier(&ftrace_suspend_notifier);
3032
3033         mutex_unlock(&ftrace_lock);
3034 }
3035
3036 /* Allocate a return stack for newly created task */
3037 void ftrace_graph_init_task(struct task_struct *t)
3038 {
3039         if (atomic_read(&ftrace_graph_active)) {
3040                 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
3041                                 * sizeof(struct ftrace_ret_stack),
3042                                 GFP_KERNEL);
3043                 if (!t->ret_stack)
3044                         return;
3045                 t->curr_ret_stack = -1;
3046                 atomic_set(&t->tracing_graph_pause, 0);
3047                 atomic_set(&t->trace_overrun, 0);
3048                 t->ftrace_timestamp = 0;
3049         } else
3050                 t->ret_stack = NULL;
3051 }
3052
3053 void ftrace_graph_exit_task(struct task_struct *t)
3054 {
3055         struct ftrace_ret_stack *ret_stack = t->ret_stack;
3056
3057         t->ret_stack = NULL;
3058         /* NULL must become visible to IRQs before we free it: */
3059         barrier();
3060
3061         kfree(ret_stack);
3062 }
3063
3064 void ftrace_graph_stop(void)
3065 {
3066         ftrace_stop();
3067 }
3068 #endif
3069