function-graph: Fix unused reference to ftrace_set_func()
[safe/jmp/linux-2.6] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/kprobes.h>
26 #include <linux/ftrace.h>
27 #include <linux/sysctl.h>
28 #include <linux/ctype.h>
29 #include <linux/list.h>
30 #include <linux/hash.h>
31
32 #include <trace/events/sched.h>
33
34 #include <asm/ftrace.h>
35 #include <asm/setup.h>
36
37 #include "trace_output.h"
38 #include "trace_stat.h"
39
40 #define FTRACE_WARN_ON(cond)                    \
41         do {                                    \
42                 if (WARN_ON(cond))              \
43                         ftrace_kill();          \
44         } while (0)
45
46 #define FTRACE_WARN_ON_ONCE(cond)               \
47         do {                                    \
48                 if (WARN_ON_ONCE(cond))         \
49                         ftrace_kill();          \
50         } while (0)
51
52 /* hash bits for specific function selection */
53 #define FTRACE_HASH_BITS 7
54 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
55
56 /* ftrace_enabled is a method to turn ftrace on or off */
57 int ftrace_enabled __read_mostly;
58 static int last_ftrace_enabled;
59
60 /* Quick disabling of function tracer. */
61 int function_trace_stop;
62
63 /* List for set_ftrace_pid's pids. */
64 LIST_HEAD(ftrace_pids);
65 struct ftrace_pid {
66         struct list_head list;
67         struct pid *pid;
68 };
69
70 /*
71  * ftrace_disabled is set when an anomaly is discovered.
72  * ftrace_disabled is much stronger than ftrace_enabled.
73  */
74 static int ftrace_disabled __read_mostly;
75
76 static DEFINE_MUTEX(ftrace_lock);
77
78 static struct ftrace_ops ftrace_list_end __read_mostly =
79 {
80         .func           = ftrace_stub,
81 };
82
83 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
84 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
85 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
86 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
87
88 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
89 {
90         struct ftrace_ops *op = ftrace_list;
91
92         /* in case someone actually ports this to alpha! */
93         read_barrier_depends();
94
95         while (op != &ftrace_list_end) {
96                 /* silly alpha */
97                 read_barrier_depends();
98                 op->func(ip, parent_ip);
99                 op = op->next;
100         };
101 }
102
103 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
104 {
105         if (!test_tsk_trace_trace(current))
106                 return;
107
108         ftrace_pid_function(ip, parent_ip);
109 }
110
111 static void set_ftrace_pid_function(ftrace_func_t func)
112 {
113         /* do not set ftrace_pid_function to itself! */
114         if (func != ftrace_pid_func)
115                 ftrace_pid_function = func;
116 }
117
118 /**
119  * clear_ftrace_function - reset the ftrace function
120  *
121  * This NULLs the ftrace function and in essence stops
122  * tracing.  There may be lag
123  */
124 void clear_ftrace_function(void)
125 {
126         ftrace_trace_function = ftrace_stub;
127         __ftrace_trace_function = ftrace_stub;
128         ftrace_pid_function = ftrace_stub;
129 }
130
131 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
132 /*
133  * For those archs that do not test ftrace_trace_stop in their
134  * mcount call site, we need to do it from C.
135  */
136 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
137 {
138         if (function_trace_stop)
139                 return;
140
141         __ftrace_trace_function(ip, parent_ip);
142 }
143 #endif
144
145 static int __register_ftrace_function(struct ftrace_ops *ops)
146 {
147         ops->next = ftrace_list;
148         /*
149          * We are entering ops into the ftrace_list but another
150          * CPU might be walking that list. We need to make sure
151          * the ops->next pointer is valid before another CPU sees
152          * the ops pointer included into the ftrace_list.
153          */
154         smp_wmb();
155         ftrace_list = ops;
156
157         if (ftrace_enabled) {
158                 ftrace_func_t func;
159
160                 if (ops->next == &ftrace_list_end)
161                         func = ops->func;
162                 else
163                         func = ftrace_list_func;
164
165                 if (!list_empty(&ftrace_pids)) {
166                         set_ftrace_pid_function(func);
167                         func = ftrace_pid_func;
168                 }
169
170                 /*
171                  * For one func, simply call it directly.
172                  * For more than one func, call the chain.
173                  */
174 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
175                 ftrace_trace_function = func;
176 #else
177                 __ftrace_trace_function = func;
178                 ftrace_trace_function = ftrace_test_stop_func;
179 #endif
180         }
181
182         return 0;
183 }
184
185 static int __unregister_ftrace_function(struct ftrace_ops *ops)
186 {
187         struct ftrace_ops **p;
188
189         /*
190          * If we are removing the last function, then simply point
191          * to the ftrace_stub.
192          */
193         if (ftrace_list == ops && ops->next == &ftrace_list_end) {
194                 ftrace_trace_function = ftrace_stub;
195                 ftrace_list = &ftrace_list_end;
196                 return 0;
197         }
198
199         for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
200                 if (*p == ops)
201                         break;
202
203         if (*p != ops)
204                 return -1;
205
206         *p = (*p)->next;
207
208         if (ftrace_enabled) {
209                 /* If we only have one func left, then call that directly */
210                 if (ftrace_list->next == &ftrace_list_end) {
211                         ftrace_func_t func = ftrace_list->func;
212
213                         if (!list_empty(&ftrace_pids)) {
214                                 set_ftrace_pid_function(func);
215                                 func = ftrace_pid_func;
216                         }
217 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
218                         ftrace_trace_function = func;
219 #else
220                         __ftrace_trace_function = func;
221 #endif
222                 }
223         }
224
225         return 0;
226 }
227
228 static void ftrace_update_pid_func(void)
229 {
230         ftrace_func_t func;
231
232         if (ftrace_trace_function == ftrace_stub)
233                 return;
234
235 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
236         func = ftrace_trace_function;
237 #else
238         func = __ftrace_trace_function;
239 #endif
240
241         if (!list_empty(&ftrace_pids)) {
242                 set_ftrace_pid_function(func);
243                 func = ftrace_pid_func;
244         } else {
245                 if (func == ftrace_pid_func)
246                         func = ftrace_pid_function;
247         }
248
249 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
250         ftrace_trace_function = func;
251 #else
252         __ftrace_trace_function = func;
253 #endif
254 }
255
256 #ifdef CONFIG_FUNCTION_PROFILER
257 struct ftrace_profile {
258         struct hlist_node               node;
259         unsigned long                   ip;
260         unsigned long                   counter;
261 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
262         unsigned long long              time;
263 #endif
264 };
265
266 struct ftrace_profile_page {
267         struct ftrace_profile_page      *next;
268         unsigned long                   index;
269         struct ftrace_profile           records[];
270 };
271
272 struct ftrace_profile_stat {
273         atomic_t                        disabled;
274         struct hlist_head               *hash;
275         struct ftrace_profile_page      *pages;
276         struct ftrace_profile_page      *start;
277         struct tracer_stat              stat;
278 };
279
280 #define PROFILE_RECORDS_SIZE                                            \
281         (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
282
283 #define PROFILES_PER_PAGE                                       \
284         (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
285
286 static int ftrace_profile_bits __read_mostly;
287 static int ftrace_profile_enabled __read_mostly;
288
289 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
290 static DEFINE_MUTEX(ftrace_profile_lock);
291
292 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
293
294 #define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
295
296 static void *
297 function_stat_next(void *v, int idx)
298 {
299         struct ftrace_profile *rec = v;
300         struct ftrace_profile_page *pg;
301
302         pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
303
304  again:
305         if (idx != 0)
306                 rec++;
307
308         if ((void *)rec >= (void *)&pg->records[pg->index]) {
309                 pg = pg->next;
310                 if (!pg)
311                         return NULL;
312                 rec = &pg->records[0];
313                 if (!rec->counter)
314                         goto again;
315         }
316
317         return rec;
318 }
319
320 static void *function_stat_start(struct tracer_stat *trace)
321 {
322         struct ftrace_profile_stat *stat =
323                 container_of(trace, struct ftrace_profile_stat, stat);
324
325         if (!stat || !stat->start)
326                 return NULL;
327
328         return function_stat_next(&stat->start->records[0], 0);
329 }
330
331 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
332 /* function graph compares on total time */
333 static int function_stat_cmp(void *p1, void *p2)
334 {
335         struct ftrace_profile *a = p1;
336         struct ftrace_profile *b = p2;
337
338         if (a->time < b->time)
339                 return -1;
340         if (a->time > b->time)
341                 return 1;
342         else
343                 return 0;
344 }
345 #else
346 /* not function graph compares against hits */
347 static int function_stat_cmp(void *p1, void *p2)
348 {
349         struct ftrace_profile *a = p1;
350         struct ftrace_profile *b = p2;
351
352         if (a->counter < b->counter)
353                 return -1;
354         if (a->counter > b->counter)
355                 return 1;
356         else
357                 return 0;
358 }
359 #endif
360
361 static int function_stat_headers(struct seq_file *m)
362 {
363 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
364         seq_printf(m, "  Function                               "
365                    "Hit    Time            Avg\n"
366                       "  --------                               "
367                    "---    ----            ---\n");
368 #else
369         seq_printf(m, "  Function                               Hit\n"
370                       "  --------                               ---\n");
371 #endif
372         return 0;
373 }
374
375 static int function_stat_show(struct seq_file *m, void *v)
376 {
377         struct ftrace_profile *rec = v;
378         char str[KSYM_SYMBOL_LEN];
379 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
380         static DEFINE_MUTEX(mutex);
381         static struct trace_seq s;
382         unsigned long long avg;
383 #endif
384
385         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
386         seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
387
388 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
389         seq_printf(m, "    ");
390         avg = rec->time;
391         do_div(avg, rec->counter);
392
393         mutex_lock(&mutex);
394         trace_seq_init(&s);
395         trace_print_graph_duration(rec->time, &s);
396         trace_seq_puts(&s, "    ");
397         trace_print_graph_duration(avg, &s);
398         trace_print_seq(m, &s);
399         mutex_unlock(&mutex);
400 #endif
401         seq_putc(m, '\n');
402
403         return 0;
404 }
405
406 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
407 {
408         struct ftrace_profile_page *pg;
409
410         pg = stat->pages = stat->start;
411
412         while (pg) {
413                 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
414                 pg->index = 0;
415                 pg = pg->next;
416         }
417
418         memset(stat->hash, 0,
419                FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
420 }
421
422 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
423 {
424         struct ftrace_profile_page *pg;
425         int functions;
426         int pages;
427         int i;
428
429         /* If we already allocated, do nothing */
430         if (stat->pages)
431                 return 0;
432
433         stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
434         if (!stat->pages)
435                 return -ENOMEM;
436
437 #ifdef CONFIG_DYNAMIC_FTRACE
438         functions = ftrace_update_tot_cnt;
439 #else
440         /*
441          * We do not know the number of functions that exist because
442          * dynamic tracing is what counts them. With past experience
443          * we have around 20K functions. That should be more than enough.
444          * It is highly unlikely we will execute every function in
445          * the kernel.
446          */
447         functions = 20000;
448 #endif
449
450         pg = stat->start = stat->pages;
451
452         pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
453
454         for (i = 0; i < pages; i++) {
455                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
456                 if (!pg->next)
457                         goto out_free;
458                 pg = pg->next;
459         }
460
461         return 0;
462
463  out_free:
464         pg = stat->start;
465         while (pg) {
466                 unsigned long tmp = (unsigned long)pg;
467
468                 pg = pg->next;
469                 free_page(tmp);
470         }
471
472         free_page((unsigned long)stat->pages);
473         stat->pages = NULL;
474         stat->start = NULL;
475
476         return -ENOMEM;
477 }
478
479 static int ftrace_profile_init_cpu(int cpu)
480 {
481         struct ftrace_profile_stat *stat;
482         int size;
483
484         stat = &per_cpu(ftrace_profile_stats, cpu);
485
486         if (stat->hash) {
487                 /* If the profile is already created, simply reset it */
488                 ftrace_profile_reset(stat);
489                 return 0;
490         }
491
492         /*
493          * We are profiling all functions, but usually only a few thousand
494          * functions are hit. We'll make a hash of 1024 items.
495          */
496         size = FTRACE_PROFILE_HASH_SIZE;
497
498         stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
499
500         if (!stat->hash)
501                 return -ENOMEM;
502
503         if (!ftrace_profile_bits) {
504                 size--;
505
506                 for (; size; size >>= 1)
507                         ftrace_profile_bits++;
508         }
509
510         /* Preallocate the function profiling pages */
511         if (ftrace_profile_pages_init(stat) < 0) {
512                 kfree(stat->hash);
513                 stat->hash = NULL;
514                 return -ENOMEM;
515         }
516
517         return 0;
518 }
519
520 static int ftrace_profile_init(void)
521 {
522         int cpu;
523         int ret = 0;
524
525         for_each_online_cpu(cpu) {
526                 ret = ftrace_profile_init_cpu(cpu);
527                 if (ret)
528                         break;
529         }
530
531         return ret;
532 }
533
534 /* interrupts must be disabled */
535 static struct ftrace_profile *
536 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
537 {
538         struct ftrace_profile *rec;
539         struct hlist_head *hhd;
540         struct hlist_node *n;
541         unsigned long key;
542
543         key = hash_long(ip, ftrace_profile_bits);
544         hhd = &stat->hash[key];
545
546         if (hlist_empty(hhd))
547                 return NULL;
548
549         hlist_for_each_entry_rcu(rec, n, hhd, node) {
550                 if (rec->ip == ip)
551                         return rec;
552         }
553
554         return NULL;
555 }
556
557 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
558                                struct ftrace_profile *rec)
559 {
560         unsigned long key;
561
562         key = hash_long(rec->ip, ftrace_profile_bits);
563         hlist_add_head_rcu(&rec->node, &stat->hash[key]);
564 }
565
566 /*
567  * The memory is already allocated, this simply finds a new record to use.
568  */
569 static struct ftrace_profile *
570 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
571 {
572         struct ftrace_profile *rec = NULL;
573
574         /* prevent recursion (from NMIs) */
575         if (atomic_inc_return(&stat->disabled) != 1)
576                 goto out;
577
578         /*
579          * Try to find the function again since an NMI
580          * could have added it
581          */
582         rec = ftrace_find_profiled_func(stat, ip);
583         if (rec)
584                 goto out;
585
586         if (stat->pages->index == PROFILES_PER_PAGE) {
587                 if (!stat->pages->next)
588                         goto out;
589                 stat->pages = stat->pages->next;
590         }
591
592         rec = &stat->pages->records[stat->pages->index++];
593         rec->ip = ip;
594         ftrace_add_profile(stat, rec);
595
596  out:
597         atomic_dec(&stat->disabled);
598
599         return rec;
600 }
601
602 static void
603 function_profile_call(unsigned long ip, unsigned long parent_ip)
604 {
605         struct ftrace_profile_stat *stat;
606         struct ftrace_profile *rec;
607         unsigned long flags;
608
609         if (!ftrace_profile_enabled)
610                 return;
611
612         local_irq_save(flags);
613
614         stat = &__get_cpu_var(ftrace_profile_stats);
615         if (!stat->hash || !ftrace_profile_enabled)
616                 goto out;
617
618         rec = ftrace_find_profiled_func(stat, ip);
619         if (!rec) {
620                 rec = ftrace_profile_alloc(stat, ip);
621                 if (!rec)
622                         goto out;
623         }
624
625         rec->counter++;
626  out:
627         local_irq_restore(flags);
628 }
629
630 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
631 static int profile_graph_entry(struct ftrace_graph_ent *trace)
632 {
633         function_profile_call(trace->func, 0);
634         return 1;
635 }
636
637 static void profile_graph_return(struct ftrace_graph_ret *trace)
638 {
639         struct ftrace_profile_stat *stat;
640         unsigned long long calltime;
641         struct ftrace_profile *rec;
642         unsigned long flags;
643
644         local_irq_save(flags);
645         stat = &__get_cpu_var(ftrace_profile_stats);
646         if (!stat->hash || !ftrace_profile_enabled)
647                 goto out;
648
649         calltime = trace->rettime - trace->calltime;
650
651         if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
652                 int index;
653
654                 index = trace->depth;
655
656                 /* Append this call time to the parent time to subtract */
657                 if (index)
658                         current->ret_stack[index - 1].subtime += calltime;
659
660                 if (current->ret_stack[index].subtime < calltime)
661                         calltime -= current->ret_stack[index].subtime;
662                 else
663                         calltime = 0;
664         }
665
666         rec = ftrace_find_profiled_func(stat, trace->func);
667         if (rec)
668                 rec->time += calltime;
669
670  out:
671         local_irq_restore(flags);
672 }
673
674 static int register_ftrace_profiler(void)
675 {
676         return register_ftrace_graph(&profile_graph_return,
677                                      &profile_graph_entry);
678 }
679
680 static void unregister_ftrace_profiler(void)
681 {
682         unregister_ftrace_graph();
683 }
684 #else
685 static struct ftrace_ops ftrace_profile_ops __read_mostly =
686 {
687         .func           = function_profile_call,
688 };
689
690 static int register_ftrace_profiler(void)
691 {
692         return register_ftrace_function(&ftrace_profile_ops);
693 }
694
695 static void unregister_ftrace_profiler(void)
696 {
697         unregister_ftrace_function(&ftrace_profile_ops);
698 }
699 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
700
701 static ssize_t
702 ftrace_profile_write(struct file *filp, const char __user *ubuf,
703                      size_t cnt, loff_t *ppos)
704 {
705         unsigned long val;
706         char buf[64];           /* big enough to hold a number */
707         int ret;
708
709         if (cnt >= sizeof(buf))
710                 return -EINVAL;
711
712         if (copy_from_user(&buf, ubuf, cnt))
713                 return -EFAULT;
714
715         buf[cnt] = 0;
716
717         ret = strict_strtoul(buf, 10, &val);
718         if (ret < 0)
719                 return ret;
720
721         val = !!val;
722
723         mutex_lock(&ftrace_profile_lock);
724         if (ftrace_profile_enabled ^ val) {
725                 if (val) {
726                         ret = ftrace_profile_init();
727                         if (ret < 0) {
728                                 cnt = ret;
729                                 goto out;
730                         }
731
732                         ret = register_ftrace_profiler();
733                         if (ret < 0) {
734                                 cnt = ret;
735                                 goto out;
736                         }
737                         ftrace_profile_enabled = 1;
738                 } else {
739                         ftrace_profile_enabled = 0;
740                         /*
741                          * unregister_ftrace_profiler calls stop_machine
742                          * so this acts like an synchronize_sched.
743                          */
744                         unregister_ftrace_profiler();
745                 }
746         }
747  out:
748         mutex_unlock(&ftrace_profile_lock);
749
750         *ppos += cnt;
751
752         return cnt;
753 }
754
755 static ssize_t
756 ftrace_profile_read(struct file *filp, char __user *ubuf,
757                      size_t cnt, loff_t *ppos)
758 {
759         char buf[64];           /* big enough to hold a number */
760         int r;
761
762         r = sprintf(buf, "%u\n", ftrace_profile_enabled);
763         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
764 }
765
766 static const struct file_operations ftrace_profile_fops = {
767         .open           = tracing_open_generic,
768         .read           = ftrace_profile_read,
769         .write          = ftrace_profile_write,
770 };
771
772 /* used to initialize the real stat files */
773 static struct tracer_stat function_stats __initdata = {
774         .name           = "functions",
775         .stat_start     = function_stat_start,
776         .stat_next      = function_stat_next,
777         .stat_cmp       = function_stat_cmp,
778         .stat_headers   = function_stat_headers,
779         .stat_show      = function_stat_show
780 };
781
782 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
783 {
784         struct ftrace_profile_stat *stat;
785         struct dentry *entry;
786         char *name;
787         int ret;
788         int cpu;
789
790         for_each_possible_cpu(cpu) {
791                 stat = &per_cpu(ftrace_profile_stats, cpu);
792
793                 /* allocate enough for function name + cpu number */
794                 name = kmalloc(32, GFP_KERNEL);
795                 if (!name) {
796                         /*
797                          * The files created are permanent, if something happens
798                          * we still do not free memory.
799                          */
800                         WARN(1,
801                              "Could not allocate stat file for cpu %d\n",
802                              cpu);
803                         return;
804                 }
805                 stat->stat = function_stats;
806                 snprintf(name, 32, "function%d", cpu);
807                 stat->stat.name = name;
808                 ret = register_stat_tracer(&stat->stat);
809                 if (ret) {
810                         WARN(1,
811                              "Could not register function stat for cpu %d\n",
812                              cpu);
813                         kfree(name);
814                         return;
815                 }
816         }
817
818         entry = debugfs_create_file("function_profile_enabled", 0644,
819                                     d_tracer, NULL, &ftrace_profile_fops);
820         if (!entry)
821                 pr_warning("Could not create debugfs "
822                            "'function_profile_enabled' entry\n");
823 }
824
825 #else /* CONFIG_FUNCTION_PROFILER */
826 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
827 {
828 }
829 #endif /* CONFIG_FUNCTION_PROFILER */
830
831 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
832
833 #ifdef CONFIG_DYNAMIC_FTRACE
834
835 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
836 # error Dynamic ftrace depends on MCOUNT_RECORD
837 #endif
838
839 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
840
841 struct ftrace_func_probe {
842         struct hlist_node       node;
843         struct ftrace_probe_ops *ops;
844         unsigned long           flags;
845         unsigned long           ip;
846         void                    *data;
847         struct rcu_head         rcu;
848 };
849
850 enum {
851         FTRACE_ENABLE_CALLS             = (1 << 0),
852         FTRACE_DISABLE_CALLS            = (1 << 1),
853         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
854         FTRACE_ENABLE_MCOUNT            = (1 << 3),
855         FTRACE_DISABLE_MCOUNT           = (1 << 4),
856         FTRACE_START_FUNC_RET           = (1 << 5),
857         FTRACE_STOP_FUNC_RET            = (1 << 6),
858 };
859
860 static int ftrace_filtered;
861
862 static struct dyn_ftrace *ftrace_new_addrs;
863
864 static DEFINE_MUTEX(ftrace_regex_lock);
865
866 struct ftrace_page {
867         struct ftrace_page      *next;
868         int                     index;
869         struct dyn_ftrace       records[];
870 };
871
872 #define ENTRIES_PER_PAGE \
873   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
874
875 /* estimate from running different kernels */
876 #define NR_TO_INIT              10000
877
878 static struct ftrace_page       *ftrace_pages_start;
879 static struct ftrace_page       *ftrace_pages;
880
881 static struct dyn_ftrace *ftrace_free_records;
882
883 /*
884  * This is a double for. Do not use 'break' to break out of the loop,
885  * you must use a goto.
886  */
887 #define do_for_each_ftrace_rec(pg, rec)                                 \
888         for (pg = ftrace_pages_start; pg; pg = pg->next) {              \
889                 int _____i;                                             \
890                 for (_____i = 0; _____i < pg->index; _____i++) {        \
891                         rec = &pg->records[_____i];
892
893 #define while_for_each_ftrace_rec()             \
894                 }                               \
895         }
896
897 #ifdef CONFIG_KPROBES
898
899 static int frozen_record_count;
900
901 static inline void freeze_record(struct dyn_ftrace *rec)
902 {
903         if (!(rec->flags & FTRACE_FL_FROZEN)) {
904                 rec->flags |= FTRACE_FL_FROZEN;
905                 frozen_record_count++;
906         }
907 }
908
909 static inline void unfreeze_record(struct dyn_ftrace *rec)
910 {
911         if (rec->flags & FTRACE_FL_FROZEN) {
912                 rec->flags &= ~FTRACE_FL_FROZEN;
913                 frozen_record_count--;
914         }
915 }
916
917 static inline int record_frozen(struct dyn_ftrace *rec)
918 {
919         return rec->flags & FTRACE_FL_FROZEN;
920 }
921 #else
922 # define freeze_record(rec)                     ({ 0; })
923 # define unfreeze_record(rec)                   ({ 0; })
924 # define record_frozen(rec)                     ({ 0; })
925 #endif /* CONFIG_KPROBES */
926
927 static void ftrace_free_rec(struct dyn_ftrace *rec)
928 {
929         rec->freelist = ftrace_free_records;
930         ftrace_free_records = rec;
931         rec->flags |= FTRACE_FL_FREE;
932 }
933
934 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
935 {
936         struct dyn_ftrace *rec;
937
938         /* First check for freed records */
939         if (ftrace_free_records) {
940                 rec = ftrace_free_records;
941
942                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
943                         FTRACE_WARN_ON_ONCE(1);
944                         ftrace_free_records = NULL;
945                         return NULL;
946                 }
947
948                 ftrace_free_records = rec->freelist;
949                 memset(rec, 0, sizeof(*rec));
950                 return rec;
951         }
952
953         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
954                 if (!ftrace_pages->next) {
955                         /* allocate another page */
956                         ftrace_pages->next =
957                                 (void *)get_zeroed_page(GFP_KERNEL);
958                         if (!ftrace_pages->next)
959                                 return NULL;
960                 }
961                 ftrace_pages = ftrace_pages->next;
962         }
963
964         return &ftrace_pages->records[ftrace_pages->index++];
965 }
966
967 static struct dyn_ftrace *
968 ftrace_record_ip(unsigned long ip)
969 {
970         struct dyn_ftrace *rec;
971
972         if (ftrace_disabled)
973                 return NULL;
974
975         rec = ftrace_alloc_dyn_node(ip);
976         if (!rec)
977                 return NULL;
978
979         rec->ip = ip;
980         rec->newlist = ftrace_new_addrs;
981         ftrace_new_addrs = rec;
982
983         return rec;
984 }
985
986 static void print_ip_ins(const char *fmt, unsigned char *p)
987 {
988         int i;
989
990         printk(KERN_CONT "%s", fmt);
991
992         for (i = 0; i < MCOUNT_INSN_SIZE; i++)
993                 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
994 }
995
996 static void ftrace_bug(int failed, unsigned long ip)
997 {
998         switch (failed) {
999         case -EFAULT:
1000                 FTRACE_WARN_ON_ONCE(1);
1001                 pr_info("ftrace faulted on modifying ");
1002                 print_ip_sym(ip);
1003                 break;
1004         case -EINVAL:
1005                 FTRACE_WARN_ON_ONCE(1);
1006                 pr_info("ftrace failed to modify ");
1007                 print_ip_sym(ip);
1008                 print_ip_ins(" actual: ", (unsigned char *)ip);
1009                 printk(KERN_CONT "\n");
1010                 break;
1011         case -EPERM:
1012                 FTRACE_WARN_ON_ONCE(1);
1013                 pr_info("ftrace faulted on writing ");
1014                 print_ip_sym(ip);
1015                 break;
1016         default:
1017                 FTRACE_WARN_ON_ONCE(1);
1018                 pr_info("ftrace faulted on unknown error ");
1019                 print_ip_sym(ip);
1020         }
1021 }
1022
1023
1024 static int
1025 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1026 {
1027         unsigned long ftrace_addr;
1028         unsigned long flag = 0UL;
1029
1030         ftrace_addr = (unsigned long)FTRACE_ADDR;
1031
1032         /*
1033          * If this record is not to be traced or we want to disable it,
1034          * then disable it.
1035          *
1036          * If we want to enable it and filtering is off, then enable it.
1037          *
1038          * If we want to enable it and filtering is on, enable it only if
1039          * it's filtered
1040          */
1041         if (enable && !(rec->flags & FTRACE_FL_NOTRACE)) {
1042                 if (!ftrace_filtered || (rec->flags & FTRACE_FL_FILTER))
1043                         flag = FTRACE_FL_ENABLED;
1044         }
1045
1046         /* If the state of this record hasn't changed, then do nothing */
1047         if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1048                 return 0;
1049
1050         if (flag) {
1051                 rec->flags |= FTRACE_FL_ENABLED;
1052                 return ftrace_make_call(rec, ftrace_addr);
1053         }
1054
1055         rec->flags &= ~FTRACE_FL_ENABLED;
1056         return ftrace_make_nop(NULL, rec, ftrace_addr);
1057 }
1058
1059 static void ftrace_replace_code(int enable)
1060 {
1061         struct dyn_ftrace *rec;
1062         struct ftrace_page *pg;
1063         int failed;
1064
1065         do_for_each_ftrace_rec(pg, rec) {
1066                 /*
1067                  * Skip over free records, records that have
1068                  * failed and not converted.
1069                  */
1070                 if (rec->flags & FTRACE_FL_FREE ||
1071                     rec->flags & FTRACE_FL_FAILED ||
1072                     !(rec->flags & FTRACE_FL_CONVERTED))
1073                         continue;
1074
1075                 /* ignore updates to this record's mcount site */
1076                 if (get_kprobe((void *)rec->ip)) {
1077                         freeze_record(rec);
1078                         continue;
1079                 } else {
1080                         unfreeze_record(rec);
1081                 }
1082
1083                 failed = __ftrace_replace_code(rec, enable);
1084                 if (failed) {
1085                         rec->flags |= FTRACE_FL_FAILED;
1086                         ftrace_bug(failed, rec->ip);
1087                         /* Stop processing */
1088                         return;
1089                 }
1090         } while_for_each_ftrace_rec();
1091 }
1092
1093 static int
1094 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1095 {
1096         unsigned long ip;
1097         int ret;
1098
1099         ip = rec->ip;
1100
1101         ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1102         if (ret) {
1103                 ftrace_bug(ret, ip);
1104                 rec->flags |= FTRACE_FL_FAILED;
1105                 return 0;
1106         }
1107         return 1;
1108 }
1109
1110 /*
1111  * archs can override this function if they must do something
1112  * before the modifying code is performed.
1113  */
1114 int __weak ftrace_arch_code_modify_prepare(void)
1115 {
1116         return 0;
1117 }
1118
1119 /*
1120  * archs can override this function if they must do something
1121  * after the modifying code is performed.
1122  */
1123 int __weak ftrace_arch_code_modify_post_process(void)
1124 {
1125         return 0;
1126 }
1127
1128 static int __ftrace_modify_code(void *data)
1129 {
1130         int *command = data;
1131
1132         if (*command & FTRACE_ENABLE_CALLS)
1133                 ftrace_replace_code(1);
1134         else if (*command & FTRACE_DISABLE_CALLS)
1135                 ftrace_replace_code(0);
1136
1137         if (*command & FTRACE_UPDATE_TRACE_FUNC)
1138                 ftrace_update_ftrace_func(ftrace_trace_function);
1139
1140         if (*command & FTRACE_START_FUNC_RET)
1141                 ftrace_enable_ftrace_graph_caller();
1142         else if (*command & FTRACE_STOP_FUNC_RET)
1143                 ftrace_disable_ftrace_graph_caller();
1144
1145         return 0;
1146 }
1147
1148 static void ftrace_run_update_code(int command)
1149 {
1150         int ret;
1151
1152         ret = ftrace_arch_code_modify_prepare();
1153         FTRACE_WARN_ON(ret);
1154         if (ret)
1155                 return;
1156
1157         stop_machine(__ftrace_modify_code, &command, NULL);
1158
1159         ret = ftrace_arch_code_modify_post_process();
1160         FTRACE_WARN_ON(ret);
1161 }
1162
1163 static ftrace_func_t saved_ftrace_func;
1164 static int ftrace_start_up;
1165
1166 static void ftrace_startup_enable(int command)
1167 {
1168         if (saved_ftrace_func != ftrace_trace_function) {
1169                 saved_ftrace_func = ftrace_trace_function;
1170                 command |= FTRACE_UPDATE_TRACE_FUNC;
1171         }
1172
1173         if (!command || !ftrace_enabled)
1174                 return;
1175
1176         ftrace_run_update_code(command);
1177 }
1178
1179 static void ftrace_startup(int command)
1180 {
1181         if (unlikely(ftrace_disabled))
1182                 return;
1183
1184         ftrace_start_up++;
1185         command |= FTRACE_ENABLE_CALLS;
1186
1187         ftrace_startup_enable(command);
1188 }
1189
1190 static void ftrace_shutdown(int command)
1191 {
1192         if (unlikely(ftrace_disabled))
1193                 return;
1194
1195         ftrace_start_up--;
1196         /*
1197          * Just warn in case of unbalance, no need to kill ftrace, it's not
1198          * critical but the ftrace_call callers may be never nopped again after
1199          * further ftrace uses.
1200          */
1201         WARN_ON_ONCE(ftrace_start_up < 0);
1202
1203         if (!ftrace_start_up)
1204                 command |= FTRACE_DISABLE_CALLS;
1205
1206         if (saved_ftrace_func != ftrace_trace_function) {
1207                 saved_ftrace_func = ftrace_trace_function;
1208                 command |= FTRACE_UPDATE_TRACE_FUNC;
1209         }
1210
1211         if (!command || !ftrace_enabled)
1212                 return;
1213
1214         ftrace_run_update_code(command);
1215 }
1216
1217 static void ftrace_startup_sysctl(void)
1218 {
1219         int command = FTRACE_ENABLE_MCOUNT;
1220
1221         if (unlikely(ftrace_disabled))
1222                 return;
1223
1224         /* Force update next time */
1225         saved_ftrace_func = NULL;
1226         /* ftrace_start_up is true if we want ftrace running */
1227         if (ftrace_start_up)
1228                 command |= FTRACE_ENABLE_CALLS;
1229
1230         ftrace_run_update_code(command);
1231 }
1232
1233 static void ftrace_shutdown_sysctl(void)
1234 {
1235         int command = FTRACE_DISABLE_MCOUNT;
1236
1237         if (unlikely(ftrace_disabled))
1238                 return;
1239
1240         /* ftrace_start_up is true if ftrace is running */
1241         if (ftrace_start_up)
1242                 command |= FTRACE_DISABLE_CALLS;
1243
1244         ftrace_run_update_code(command);
1245 }
1246
1247 static cycle_t          ftrace_update_time;
1248 static unsigned long    ftrace_update_cnt;
1249 unsigned long           ftrace_update_tot_cnt;
1250
1251 static int ftrace_update_code(struct module *mod)
1252 {
1253         struct dyn_ftrace *p;
1254         cycle_t start, stop;
1255
1256         start = ftrace_now(raw_smp_processor_id());
1257         ftrace_update_cnt = 0;
1258
1259         while (ftrace_new_addrs) {
1260
1261                 /* If something went wrong, bail without enabling anything */
1262                 if (unlikely(ftrace_disabled))
1263                         return -1;
1264
1265                 p = ftrace_new_addrs;
1266                 ftrace_new_addrs = p->newlist;
1267                 p->flags = 0L;
1268
1269                 /*
1270                  * Do the initial record convertion from mcount jump
1271                  * to the NOP instructions.
1272                  */
1273                 if (!ftrace_code_disable(mod, p)) {
1274                         ftrace_free_rec(p);
1275                         continue;
1276                 }
1277
1278                 p->flags |= FTRACE_FL_CONVERTED;
1279                 ftrace_update_cnt++;
1280
1281                 /*
1282                  * If the tracing is enabled, go ahead and enable the record.
1283                  *
1284                  * The reason not to enable the record immediatelly is the
1285                  * inherent check of ftrace_make_nop/ftrace_make_call for
1286                  * correct previous instructions.  Making first the NOP
1287                  * conversion puts the module to the correct state, thus
1288                  * passing the ftrace_make_call check.
1289                  */
1290                 if (ftrace_start_up) {
1291                         int failed = __ftrace_replace_code(p, 1);
1292                         if (failed) {
1293                                 ftrace_bug(failed, p->ip);
1294                                 ftrace_free_rec(p);
1295                         }
1296                 }
1297         }
1298
1299         stop = ftrace_now(raw_smp_processor_id());
1300         ftrace_update_time = stop - start;
1301         ftrace_update_tot_cnt += ftrace_update_cnt;
1302
1303         return 0;
1304 }
1305
1306 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
1307 {
1308         struct ftrace_page *pg;
1309         int cnt;
1310         int i;
1311
1312         /* allocate a few pages */
1313         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
1314         if (!ftrace_pages_start)
1315                 return -1;
1316
1317         /*
1318          * Allocate a few more pages.
1319          *
1320          * TODO: have some parser search vmlinux before
1321          *   final linking to find all calls to ftrace.
1322          *   Then we can:
1323          *    a) know how many pages to allocate.
1324          *     and/or
1325          *    b) set up the table then.
1326          *
1327          *  The dynamic code is still necessary for
1328          *  modules.
1329          */
1330
1331         pg = ftrace_pages = ftrace_pages_start;
1332
1333         cnt = num_to_init / ENTRIES_PER_PAGE;
1334         pr_info("ftrace: allocating %ld entries in %d pages\n",
1335                 num_to_init, cnt + 1);
1336
1337         for (i = 0; i < cnt; i++) {
1338                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
1339
1340                 /* If we fail, we'll try later anyway */
1341                 if (!pg->next)
1342                         break;
1343
1344                 pg = pg->next;
1345         }
1346
1347         return 0;
1348 }
1349
1350 enum {
1351         FTRACE_ITER_FILTER      = (1 << 0),
1352         FTRACE_ITER_NOTRACE     = (1 << 1),
1353         FTRACE_ITER_FAILURES    = (1 << 2),
1354         FTRACE_ITER_PRINTALL    = (1 << 3),
1355         FTRACE_ITER_HASH        = (1 << 4),
1356 };
1357
1358 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
1359
1360 struct ftrace_iterator {
1361         struct ftrace_page      *pg;
1362         int                     hidx;
1363         int                     idx;
1364         unsigned                flags;
1365         struct trace_parser     parser;
1366 };
1367
1368 static void *
1369 t_hash_next(struct seq_file *m, void *v, loff_t *pos)
1370 {
1371         struct ftrace_iterator *iter = m->private;
1372         struct hlist_node *hnd = v;
1373         struct hlist_head *hhd;
1374
1375         WARN_ON(!(iter->flags & FTRACE_ITER_HASH));
1376
1377         (*pos)++;
1378
1379  retry:
1380         if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
1381                 return NULL;
1382
1383         hhd = &ftrace_func_hash[iter->hidx];
1384
1385         if (hlist_empty(hhd)) {
1386                 iter->hidx++;
1387                 hnd = NULL;
1388                 goto retry;
1389         }
1390
1391         if (!hnd)
1392                 hnd = hhd->first;
1393         else {
1394                 hnd = hnd->next;
1395                 if (!hnd) {
1396                         iter->hidx++;
1397                         goto retry;
1398                 }
1399         }
1400
1401         return hnd;
1402 }
1403
1404 static void *t_hash_start(struct seq_file *m, loff_t *pos)
1405 {
1406         struct ftrace_iterator *iter = m->private;
1407         void *p = NULL;
1408         loff_t l;
1409
1410         if (!(iter->flags & FTRACE_ITER_HASH))
1411                 *pos = 0;
1412
1413         iter->flags |= FTRACE_ITER_HASH;
1414
1415         iter->hidx = 0;
1416         for (l = 0; l <= *pos; ) {
1417                 p = t_hash_next(m, p, &l);
1418                 if (!p)
1419                         break;
1420         }
1421         return p;
1422 }
1423
1424 static int t_hash_show(struct seq_file *m, void *v)
1425 {
1426         struct ftrace_func_probe *rec;
1427         struct hlist_node *hnd = v;
1428
1429         rec = hlist_entry(hnd, struct ftrace_func_probe, node);
1430
1431         if (rec->ops->print)
1432                 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
1433
1434         seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
1435
1436         if (rec->data)
1437                 seq_printf(m, ":%p", rec->data);
1438         seq_putc(m, '\n');
1439
1440         return 0;
1441 }
1442
1443 static void *
1444 t_next(struct seq_file *m, void *v, loff_t *pos)
1445 {
1446         struct ftrace_iterator *iter = m->private;
1447         struct dyn_ftrace *rec = NULL;
1448
1449         if (iter->flags & FTRACE_ITER_HASH)
1450                 return t_hash_next(m, v, pos);
1451
1452         (*pos)++;
1453
1454         if (iter->flags & FTRACE_ITER_PRINTALL)
1455                 return NULL;
1456
1457  retry:
1458         if (iter->idx >= iter->pg->index) {
1459                 if (iter->pg->next) {
1460                         iter->pg = iter->pg->next;
1461                         iter->idx = 0;
1462                         goto retry;
1463                 }
1464         } else {
1465                 rec = &iter->pg->records[iter->idx++];
1466                 if ((rec->flags & FTRACE_FL_FREE) ||
1467
1468                     (!(iter->flags & FTRACE_ITER_FAILURES) &&
1469                      (rec->flags & FTRACE_FL_FAILED)) ||
1470
1471                     ((iter->flags & FTRACE_ITER_FAILURES) &&
1472                      !(rec->flags & FTRACE_FL_FAILED)) ||
1473
1474                     ((iter->flags & FTRACE_ITER_FILTER) &&
1475                      !(rec->flags & FTRACE_FL_FILTER)) ||
1476
1477                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
1478                      !(rec->flags & FTRACE_FL_NOTRACE))) {
1479                         rec = NULL;
1480                         goto retry;
1481                 }
1482         }
1483
1484         return rec;
1485 }
1486
1487 static void *t_start(struct seq_file *m, loff_t *pos)
1488 {
1489         struct ftrace_iterator *iter = m->private;
1490         void *p = NULL;
1491         loff_t l;
1492
1493         mutex_lock(&ftrace_lock);
1494         /*
1495          * For set_ftrace_filter reading, if we have the filter
1496          * off, we can short cut and just print out that all
1497          * functions are enabled.
1498          */
1499         if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
1500                 if (*pos > 0)
1501                         return t_hash_start(m, pos);
1502                 iter->flags |= FTRACE_ITER_PRINTALL;
1503                 return iter;
1504         }
1505
1506         if (iter->flags & FTRACE_ITER_HASH)
1507                 return t_hash_start(m, pos);
1508
1509         iter->pg = ftrace_pages_start;
1510         iter->idx = 0;
1511         for (l = 0; l <= *pos; ) {
1512                 p = t_next(m, p, &l);
1513                 if (!p)
1514                         break;
1515         }
1516
1517         if (!p && iter->flags & FTRACE_ITER_FILTER)
1518                 return t_hash_start(m, pos);
1519
1520         return p;
1521 }
1522
1523 static void t_stop(struct seq_file *m, void *p)
1524 {
1525         mutex_unlock(&ftrace_lock);
1526 }
1527
1528 static int t_show(struct seq_file *m, void *v)
1529 {
1530         struct ftrace_iterator *iter = m->private;
1531         struct dyn_ftrace *rec = v;
1532
1533         if (iter->flags & FTRACE_ITER_HASH)
1534                 return t_hash_show(m, v);
1535
1536         if (iter->flags & FTRACE_ITER_PRINTALL) {
1537                 seq_printf(m, "#### all functions enabled ####\n");
1538                 return 0;
1539         }
1540
1541         if (!rec)
1542                 return 0;
1543
1544         seq_printf(m, "%ps\n", (void *)rec->ip);
1545
1546         return 0;
1547 }
1548
1549 static const struct seq_operations show_ftrace_seq_ops = {
1550         .start = t_start,
1551         .next = t_next,
1552         .stop = t_stop,
1553         .show = t_show,
1554 };
1555
1556 static int
1557 ftrace_avail_open(struct inode *inode, struct file *file)
1558 {
1559         struct ftrace_iterator *iter;
1560         int ret;
1561
1562         if (unlikely(ftrace_disabled))
1563                 return -ENODEV;
1564
1565         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1566         if (!iter)
1567                 return -ENOMEM;
1568
1569         iter->pg = ftrace_pages_start;
1570
1571         ret = seq_open(file, &show_ftrace_seq_ops);
1572         if (!ret) {
1573                 struct seq_file *m = file->private_data;
1574
1575                 m->private = iter;
1576         } else {
1577                 kfree(iter);
1578         }
1579
1580         return ret;
1581 }
1582
1583 static int
1584 ftrace_failures_open(struct inode *inode, struct file *file)
1585 {
1586         int ret;
1587         struct seq_file *m;
1588         struct ftrace_iterator *iter;
1589
1590         ret = ftrace_avail_open(inode, file);
1591         if (!ret) {
1592                 m = (struct seq_file *)file->private_data;
1593                 iter = (struct ftrace_iterator *)m->private;
1594                 iter->flags = FTRACE_ITER_FAILURES;
1595         }
1596
1597         return ret;
1598 }
1599
1600
1601 static void ftrace_filter_reset(int enable)
1602 {
1603         struct ftrace_page *pg;
1604         struct dyn_ftrace *rec;
1605         unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1606
1607         mutex_lock(&ftrace_lock);
1608         if (enable)
1609                 ftrace_filtered = 0;
1610         do_for_each_ftrace_rec(pg, rec) {
1611                 if (rec->flags & FTRACE_FL_FAILED)
1612                         continue;
1613                 rec->flags &= ~type;
1614         } while_for_each_ftrace_rec();
1615         mutex_unlock(&ftrace_lock);
1616 }
1617
1618 static int
1619 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1620 {
1621         struct ftrace_iterator *iter;
1622         int ret = 0;
1623
1624         if (unlikely(ftrace_disabled))
1625                 return -ENODEV;
1626
1627         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1628         if (!iter)
1629                 return -ENOMEM;
1630
1631         if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
1632                 kfree(iter);
1633                 return -ENOMEM;
1634         }
1635
1636         mutex_lock(&ftrace_regex_lock);
1637         if ((file->f_mode & FMODE_WRITE) &&
1638             (file->f_flags & O_TRUNC))
1639                 ftrace_filter_reset(enable);
1640
1641         if (file->f_mode & FMODE_READ) {
1642                 iter->pg = ftrace_pages_start;
1643                 iter->flags = enable ? FTRACE_ITER_FILTER :
1644                         FTRACE_ITER_NOTRACE;
1645
1646                 ret = seq_open(file, &show_ftrace_seq_ops);
1647                 if (!ret) {
1648                         struct seq_file *m = file->private_data;
1649                         m->private = iter;
1650                 } else {
1651                         trace_parser_put(&iter->parser);
1652                         kfree(iter);
1653                 }
1654         } else
1655                 file->private_data = iter;
1656         mutex_unlock(&ftrace_regex_lock);
1657
1658         return ret;
1659 }
1660
1661 static int
1662 ftrace_filter_open(struct inode *inode, struct file *file)
1663 {
1664         return ftrace_regex_open(inode, file, 1);
1665 }
1666
1667 static int
1668 ftrace_notrace_open(struct inode *inode, struct file *file)
1669 {
1670         return ftrace_regex_open(inode, file, 0);
1671 }
1672
1673 static loff_t
1674 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1675 {
1676         loff_t ret;
1677
1678         if (file->f_mode & FMODE_READ)
1679                 ret = seq_lseek(file, offset, origin);
1680         else
1681                 file->f_pos = ret = 1;
1682
1683         return ret;
1684 }
1685
1686 static int ftrace_match(char *str, char *regex, int len, int type)
1687 {
1688         int matched = 0;
1689         int slen;
1690
1691         switch (type) {
1692         case MATCH_FULL:
1693                 if (strcmp(str, regex) == 0)
1694                         matched = 1;
1695                 break;
1696         case MATCH_FRONT_ONLY:
1697                 if (strncmp(str, regex, len) == 0)
1698                         matched = 1;
1699                 break;
1700         case MATCH_MIDDLE_ONLY:
1701                 if (strstr(str, regex))
1702                         matched = 1;
1703                 break;
1704         case MATCH_END_ONLY:
1705                 slen = strlen(str);
1706                 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
1707                         matched = 1;
1708                 break;
1709         }
1710
1711         return matched;
1712 }
1713
1714 static int
1715 ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
1716 {
1717         char str[KSYM_SYMBOL_LEN];
1718
1719         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1720         return ftrace_match(str, regex, len, type);
1721 }
1722
1723 static int ftrace_match_records(char *buff, int len, int enable)
1724 {
1725         unsigned int search_len;
1726         struct ftrace_page *pg;
1727         struct dyn_ftrace *rec;
1728         unsigned long flag;
1729         char *search;
1730         int type;
1731         int not;
1732         int found = 0;
1733
1734         flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1735         type = filter_parse_regex(buff, len, &search, &not);
1736
1737         search_len = strlen(search);
1738
1739         mutex_lock(&ftrace_lock);
1740         do_for_each_ftrace_rec(pg, rec) {
1741
1742                 if (rec->flags & FTRACE_FL_FAILED)
1743                         continue;
1744
1745                 if (ftrace_match_record(rec, search, search_len, type)) {
1746                         if (not)
1747                                 rec->flags &= ~flag;
1748                         else
1749                                 rec->flags |= flag;
1750                         found = 1;
1751                 }
1752                 /*
1753                  * Only enable filtering if we have a function that
1754                  * is filtered on.
1755                  */
1756                 if (enable && (rec->flags & FTRACE_FL_FILTER))
1757                         ftrace_filtered = 1;
1758         } while_for_each_ftrace_rec();
1759         mutex_unlock(&ftrace_lock);
1760
1761         return found;
1762 }
1763
1764 static int
1765 ftrace_match_module_record(struct dyn_ftrace *rec, char *mod,
1766                            char *regex, int len, int type)
1767 {
1768         char str[KSYM_SYMBOL_LEN];
1769         char *modname;
1770
1771         kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
1772
1773         if (!modname || strcmp(modname, mod))
1774                 return 0;
1775
1776         /* blank search means to match all funcs in the mod */
1777         if (len)
1778                 return ftrace_match(str, regex, len, type);
1779         else
1780                 return 1;
1781 }
1782
1783 static int ftrace_match_module_records(char *buff, char *mod, int enable)
1784 {
1785         unsigned search_len = 0;
1786         struct ftrace_page *pg;
1787         struct dyn_ftrace *rec;
1788         int type = MATCH_FULL;
1789         char *search = buff;
1790         unsigned long flag;
1791         int not = 0;
1792         int found = 0;
1793
1794         flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1795
1796         /* blank or '*' mean the same */
1797         if (strcmp(buff, "*") == 0)
1798                 buff[0] = 0;
1799
1800         /* handle the case of 'dont filter this module' */
1801         if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
1802                 buff[0] = 0;
1803                 not = 1;
1804         }
1805
1806         if (strlen(buff)) {
1807                 type = filter_parse_regex(buff, strlen(buff), &search, &not);
1808                 search_len = strlen(search);
1809         }
1810
1811         mutex_lock(&ftrace_lock);
1812         do_for_each_ftrace_rec(pg, rec) {
1813
1814                 if (rec->flags & FTRACE_FL_FAILED)
1815                         continue;
1816
1817                 if (ftrace_match_module_record(rec, mod,
1818                                                search, search_len, type)) {
1819                         if (not)
1820                                 rec->flags &= ~flag;
1821                         else
1822                                 rec->flags |= flag;
1823                         found = 1;
1824                 }
1825                 if (enable && (rec->flags & FTRACE_FL_FILTER))
1826                         ftrace_filtered = 1;
1827
1828         } while_for_each_ftrace_rec();
1829         mutex_unlock(&ftrace_lock);
1830
1831         return found;
1832 }
1833
1834 /*
1835  * We register the module command as a template to show others how
1836  * to register the a command as well.
1837  */
1838
1839 static int
1840 ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
1841 {
1842         char *mod;
1843
1844         /*
1845          * cmd == 'mod' because we only registered this func
1846          * for the 'mod' ftrace_func_command.
1847          * But if you register one func with multiple commands,
1848          * you can tell which command was used by the cmd
1849          * parameter.
1850          */
1851
1852         /* we must have a module name */
1853         if (!param)
1854                 return -EINVAL;
1855
1856         mod = strsep(&param, ":");
1857         if (!strlen(mod))
1858                 return -EINVAL;
1859
1860         if (ftrace_match_module_records(func, mod, enable))
1861                 return 0;
1862         return -EINVAL;
1863 }
1864
1865 static struct ftrace_func_command ftrace_mod_cmd = {
1866         .name                   = "mod",
1867         .func                   = ftrace_mod_callback,
1868 };
1869
1870 static int __init ftrace_mod_cmd_init(void)
1871 {
1872         return register_ftrace_command(&ftrace_mod_cmd);
1873 }
1874 device_initcall(ftrace_mod_cmd_init);
1875
1876 static void
1877 function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
1878 {
1879         struct ftrace_func_probe *entry;
1880         struct hlist_head *hhd;
1881         struct hlist_node *n;
1882         unsigned long key;
1883         int resched;
1884
1885         key = hash_long(ip, FTRACE_HASH_BITS);
1886
1887         hhd = &ftrace_func_hash[key];
1888
1889         if (hlist_empty(hhd))
1890                 return;
1891
1892         /*
1893          * Disable preemption for these calls to prevent a RCU grace
1894          * period. This syncs the hash iteration and freeing of items
1895          * on the hash. rcu_read_lock is too dangerous here.
1896          */
1897         resched = ftrace_preempt_disable();
1898         hlist_for_each_entry_rcu(entry, n, hhd, node) {
1899                 if (entry->ip == ip)
1900                         entry->ops->func(ip, parent_ip, &entry->data);
1901         }
1902         ftrace_preempt_enable(resched);
1903 }
1904
1905 static struct ftrace_ops trace_probe_ops __read_mostly =
1906 {
1907         .func           = function_trace_probe_call,
1908 };
1909
1910 static int ftrace_probe_registered;
1911
1912 static void __enable_ftrace_function_probe(void)
1913 {
1914         int i;
1915
1916         if (ftrace_probe_registered)
1917                 return;
1918
1919         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1920                 struct hlist_head *hhd = &ftrace_func_hash[i];
1921                 if (hhd->first)
1922                         break;
1923         }
1924         /* Nothing registered? */
1925         if (i == FTRACE_FUNC_HASHSIZE)
1926                 return;
1927
1928         __register_ftrace_function(&trace_probe_ops);
1929         ftrace_startup(0);
1930         ftrace_probe_registered = 1;
1931 }
1932
1933 static void __disable_ftrace_function_probe(void)
1934 {
1935         int i;
1936
1937         if (!ftrace_probe_registered)
1938                 return;
1939
1940         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1941                 struct hlist_head *hhd = &ftrace_func_hash[i];
1942                 if (hhd->first)
1943                         return;
1944         }
1945
1946         /* no more funcs left */
1947         __unregister_ftrace_function(&trace_probe_ops);
1948         ftrace_shutdown(0);
1949         ftrace_probe_registered = 0;
1950 }
1951
1952
1953 static void ftrace_free_entry_rcu(struct rcu_head *rhp)
1954 {
1955         struct ftrace_func_probe *entry =
1956                 container_of(rhp, struct ftrace_func_probe, rcu);
1957
1958         if (entry->ops->free)
1959                 entry->ops->free(&entry->data);
1960         kfree(entry);
1961 }
1962
1963
1964 int
1965 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
1966                               void *data)
1967 {
1968         struct ftrace_func_probe *entry;
1969         struct ftrace_page *pg;
1970         struct dyn_ftrace *rec;
1971         int type, len, not;
1972         unsigned long key;
1973         int count = 0;
1974         char *search;
1975
1976         type = filter_parse_regex(glob, strlen(glob), &search, &not);
1977         len = strlen(search);
1978
1979         /* we do not support '!' for function probes */
1980         if (WARN_ON(not))
1981                 return -EINVAL;
1982
1983         mutex_lock(&ftrace_lock);
1984         do_for_each_ftrace_rec(pg, rec) {
1985
1986                 if (rec->flags & FTRACE_FL_FAILED)
1987                         continue;
1988
1989                 if (!ftrace_match_record(rec, search, len, type))
1990                         continue;
1991
1992                 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1993                 if (!entry) {
1994                         /* If we did not process any, then return error */
1995                         if (!count)
1996                                 count = -ENOMEM;
1997                         goto out_unlock;
1998                 }
1999
2000                 count++;
2001
2002                 entry->data = data;
2003
2004                 /*
2005                  * The caller might want to do something special
2006                  * for each function we find. We call the callback
2007                  * to give the caller an opportunity to do so.
2008                  */
2009                 if (ops->callback) {
2010                         if (ops->callback(rec->ip, &entry->data) < 0) {
2011                                 /* caller does not like this func */
2012                                 kfree(entry);
2013                                 continue;
2014                         }
2015                 }
2016
2017                 entry->ops = ops;
2018                 entry->ip = rec->ip;
2019
2020                 key = hash_long(entry->ip, FTRACE_HASH_BITS);
2021                 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
2022
2023         } while_for_each_ftrace_rec();
2024         __enable_ftrace_function_probe();
2025
2026  out_unlock:
2027         mutex_unlock(&ftrace_lock);
2028
2029         return count;
2030 }
2031
2032 enum {
2033         PROBE_TEST_FUNC         = 1,
2034         PROBE_TEST_DATA         = 2
2035 };
2036
2037 static void
2038 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2039                                   void *data, int flags)
2040 {
2041         struct ftrace_func_probe *entry;
2042         struct hlist_node *n, *tmp;
2043         char str[KSYM_SYMBOL_LEN];
2044         int type = MATCH_FULL;
2045         int i, len = 0;
2046         char *search;
2047
2048         if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
2049                 glob = NULL;
2050         else if (glob) {
2051                 int not;
2052
2053                 type = filter_parse_regex(glob, strlen(glob), &search, &not);
2054                 len = strlen(search);
2055
2056                 /* we do not support '!' for function probes */
2057                 if (WARN_ON(not))
2058                         return;
2059         }
2060
2061         mutex_lock(&ftrace_lock);
2062         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2063                 struct hlist_head *hhd = &ftrace_func_hash[i];
2064
2065                 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
2066
2067                         /* break up if statements for readability */
2068                         if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
2069                                 continue;
2070
2071                         if ((flags & PROBE_TEST_DATA) && entry->data != data)
2072                                 continue;
2073
2074                         /* do this last, since it is the most expensive */
2075                         if (glob) {
2076                                 kallsyms_lookup(entry->ip, NULL, NULL,
2077                                                 NULL, str);
2078                                 if (!ftrace_match(str, glob, len, type))
2079                                         continue;
2080                         }
2081
2082                         hlist_del(&entry->node);
2083                         call_rcu(&entry->rcu, ftrace_free_entry_rcu);
2084                 }
2085         }
2086         __disable_ftrace_function_probe();
2087         mutex_unlock(&ftrace_lock);
2088 }
2089
2090 void
2091 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2092                                 void *data)
2093 {
2094         __unregister_ftrace_function_probe(glob, ops, data,
2095                                           PROBE_TEST_FUNC | PROBE_TEST_DATA);
2096 }
2097
2098 void
2099 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
2100 {
2101         __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
2102 }
2103
2104 void unregister_ftrace_function_probe_all(char *glob)
2105 {
2106         __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
2107 }
2108
2109 static LIST_HEAD(ftrace_commands);
2110 static DEFINE_MUTEX(ftrace_cmd_mutex);
2111
2112 int register_ftrace_command(struct ftrace_func_command *cmd)
2113 {
2114         struct ftrace_func_command *p;
2115         int ret = 0;
2116
2117         mutex_lock(&ftrace_cmd_mutex);
2118         list_for_each_entry(p, &ftrace_commands, list) {
2119                 if (strcmp(cmd->name, p->name) == 0) {
2120                         ret = -EBUSY;
2121                         goto out_unlock;
2122                 }
2123         }
2124         list_add(&cmd->list, &ftrace_commands);
2125  out_unlock:
2126         mutex_unlock(&ftrace_cmd_mutex);
2127
2128         return ret;
2129 }
2130
2131 int unregister_ftrace_command(struct ftrace_func_command *cmd)
2132 {
2133         struct ftrace_func_command *p, *n;
2134         int ret = -ENODEV;
2135
2136         mutex_lock(&ftrace_cmd_mutex);
2137         list_for_each_entry_safe(p, n, &ftrace_commands, list) {
2138                 if (strcmp(cmd->name, p->name) == 0) {
2139                         ret = 0;
2140                         list_del_init(&p->list);
2141                         goto out_unlock;
2142                 }
2143         }
2144  out_unlock:
2145         mutex_unlock(&ftrace_cmd_mutex);
2146
2147         return ret;
2148 }
2149
2150 static int ftrace_process_regex(char *buff, int len, int enable)
2151 {
2152         char *func, *command, *next = buff;
2153         struct ftrace_func_command *p;
2154         int ret = -EINVAL;
2155
2156         func = strsep(&next, ":");
2157
2158         if (!next) {
2159                 if (ftrace_match_records(func, len, enable))
2160                         return 0;
2161                 return ret;
2162         }
2163
2164         /* command found */
2165
2166         command = strsep(&next, ":");
2167
2168         mutex_lock(&ftrace_cmd_mutex);
2169         list_for_each_entry(p, &ftrace_commands, list) {
2170                 if (strcmp(p->name, command) == 0) {
2171                         ret = p->func(func, command, next, enable);
2172                         goto out_unlock;
2173                 }
2174         }
2175  out_unlock:
2176         mutex_unlock(&ftrace_cmd_mutex);
2177
2178         return ret;
2179 }
2180
2181 static ssize_t
2182 ftrace_regex_write(struct file *file, const char __user *ubuf,
2183                    size_t cnt, loff_t *ppos, int enable)
2184 {
2185         struct ftrace_iterator *iter;
2186         struct trace_parser *parser;
2187         ssize_t ret, read;
2188
2189         if (!cnt)
2190                 return 0;
2191
2192         mutex_lock(&ftrace_regex_lock);
2193
2194         if (file->f_mode & FMODE_READ) {
2195                 struct seq_file *m = file->private_data;
2196                 iter = m->private;
2197         } else
2198                 iter = file->private_data;
2199
2200         parser = &iter->parser;
2201         read = trace_get_user(parser, ubuf, cnt, ppos);
2202
2203         if (read >= 0 && trace_parser_loaded(parser) &&
2204             !trace_parser_cont(parser)) {
2205                 ret = ftrace_process_regex(parser->buffer,
2206                                            parser->idx, enable);
2207                 trace_parser_clear(parser);
2208                 if (ret)
2209                         goto out_unlock;
2210         }
2211
2212         ret = read;
2213 out_unlock:
2214         mutex_unlock(&ftrace_regex_lock);
2215
2216         return ret;
2217 }
2218
2219 static ssize_t
2220 ftrace_filter_write(struct file *file, const char __user *ubuf,
2221                     size_t cnt, loff_t *ppos)
2222 {
2223         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
2224 }
2225
2226 static ssize_t
2227 ftrace_notrace_write(struct file *file, const char __user *ubuf,
2228                      size_t cnt, loff_t *ppos)
2229 {
2230         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
2231 }
2232
2233 static void
2234 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
2235 {
2236         if (unlikely(ftrace_disabled))
2237                 return;
2238
2239         mutex_lock(&ftrace_regex_lock);
2240         if (reset)
2241                 ftrace_filter_reset(enable);
2242         if (buf)
2243                 ftrace_match_records(buf, len, enable);
2244         mutex_unlock(&ftrace_regex_lock);
2245 }
2246
2247 /**
2248  * ftrace_set_filter - set a function to filter on in ftrace
2249  * @buf - the string that holds the function filter text.
2250  * @len - the length of the string.
2251  * @reset - non zero to reset all filters before applying this filter.
2252  *
2253  * Filters denote which functions should be enabled when tracing is enabled.
2254  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2255  */
2256 void ftrace_set_filter(unsigned char *buf, int len, int reset)
2257 {
2258         ftrace_set_regex(buf, len, reset, 1);
2259 }
2260
2261 /**
2262  * ftrace_set_notrace - set a function to not trace in ftrace
2263  * @buf - the string that holds the function notrace text.
2264  * @len - the length of the string.
2265  * @reset - non zero to reset all filters before applying this filter.
2266  *
2267  * Notrace Filters denote which functions should not be enabled when tracing
2268  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2269  * for tracing.
2270  */
2271 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
2272 {
2273         ftrace_set_regex(buf, len, reset, 0);
2274 }
2275
2276 /*
2277  * command line interface to allow users to set filters on boot up.
2278  */
2279 #define FTRACE_FILTER_SIZE              COMMAND_LINE_SIZE
2280 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
2281 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
2282
2283 static int __init set_ftrace_notrace(char *str)
2284 {
2285         strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
2286         return 1;
2287 }
2288 __setup("ftrace_notrace=", set_ftrace_notrace);
2289
2290 static int __init set_ftrace_filter(char *str)
2291 {
2292         strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
2293         return 1;
2294 }
2295 __setup("ftrace_filter=", set_ftrace_filter);
2296
2297 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2298 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
2299 static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
2300
2301 static int __init set_graph_function(char *str)
2302 {
2303         strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
2304         return 1;
2305 }
2306 __setup("ftrace_graph_filter=", set_graph_function);
2307
2308 static void __init set_ftrace_early_graph(char *buf)
2309 {
2310         int ret;
2311         char *func;
2312
2313         while (buf) {
2314                 func = strsep(&buf, ",");
2315                 /* we allow only one expression at a time */
2316                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
2317                                       func);
2318                 if (ret)
2319                         printk(KERN_DEBUG "ftrace: function %s not "
2320                                           "traceable\n", func);
2321         }
2322 }
2323 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2324
2325 static void __init set_ftrace_early_filter(char *buf, int enable)
2326 {
2327         char *func;
2328
2329         while (buf) {
2330                 func = strsep(&buf, ",");
2331                 ftrace_set_regex(func, strlen(func), 0, enable);
2332         }
2333 }
2334
2335 static void __init set_ftrace_early_filters(void)
2336 {
2337         if (ftrace_filter_buf[0])
2338                 set_ftrace_early_filter(ftrace_filter_buf, 1);
2339         if (ftrace_notrace_buf[0])
2340                 set_ftrace_early_filter(ftrace_notrace_buf, 0);
2341 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2342         if (ftrace_graph_buf[0])
2343                 set_ftrace_early_graph(ftrace_graph_buf);
2344 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2345 }
2346
2347 static int
2348 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
2349 {
2350         struct seq_file *m = (struct seq_file *)file->private_data;
2351         struct ftrace_iterator *iter;
2352         struct trace_parser *parser;
2353
2354         mutex_lock(&ftrace_regex_lock);
2355         if (file->f_mode & FMODE_READ) {
2356                 iter = m->private;
2357
2358                 seq_release(inode, file);
2359         } else
2360                 iter = file->private_data;
2361
2362         parser = &iter->parser;
2363         if (trace_parser_loaded(parser)) {
2364                 parser->buffer[parser->idx] = 0;
2365                 ftrace_match_records(parser->buffer, parser->idx, enable);
2366         }
2367
2368         mutex_lock(&ftrace_lock);
2369         if (ftrace_start_up && ftrace_enabled)
2370                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
2371         mutex_unlock(&ftrace_lock);
2372
2373         trace_parser_put(parser);
2374         kfree(iter);
2375
2376         mutex_unlock(&ftrace_regex_lock);
2377         return 0;
2378 }
2379
2380 static int
2381 ftrace_filter_release(struct inode *inode, struct file *file)
2382 {
2383         return ftrace_regex_release(inode, file, 1);
2384 }
2385
2386 static int
2387 ftrace_notrace_release(struct inode *inode, struct file *file)
2388 {
2389         return ftrace_regex_release(inode, file, 0);
2390 }
2391
2392 static const struct file_operations ftrace_avail_fops = {
2393         .open = ftrace_avail_open,
2394         .read = seq_read,
2395         .llseek = seq_lseek,
2396         .release = seq_release_private,
2397 };
2398
2399 static const struct file_operations ftrace_failures_fops = {
2400         .open = ftrace_failures_open,
2401         .read = seq_read,
2402         .llseek = seq_lseek,
2403         .release = seq_release_private,
2404 };
2405
2406 static const struct file_operations ftrace_filter_fops = {
2407         .open = ftrace_filter_open,
2408         .read = seq_read,
2409         .write = ftrace_filter_write,
2410         .llseek = ftrace_regex_lseek,
2411         .release = ftrace_filter_release,
2412 };
2413
2414 static const struct file_operations ftrace_notrace_fops = {
2415         .open = ftrace_notrace_open,
2416         .read = seq_read,
2417         .write = ftrace_notrace_write,
2418         .llseek = ftrace_regex_lseek,
2419         .release = ftrace_notrace_release,
2420 };
2421
2422 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2423
2424 static DEFINE_MUTEX(graph_lock);
2425
2426 int ftrace_graph_count;
2427 int ftrace_graph_filter_enabled;
2428 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
2429
2430 static void *
2431 __g_next(struct seq_file *m, loff_t *pos)
2432 {
2433         if (*pos >= ftrace_graph_count)
2434                 return NULL;
2435         return &ftrace_graph_funcs[*pos];
2436 }
2437
2438 static void *
2439 g_next(struct seq_file *m, void *v, loff_t *pos)
2440 {
2441         (*pos)++;
2442         return __g_next(m, pos);
2443 }
2444
2445 static void *g_start(struct seq_file *m, loff_t *pos)
2446 {
2447         mutex_lock(&graph_lock);
2448
2449         /* Nothing, tell g_show to print all functions are enabled */
2450         if (!ftrace_graph_filter_enabled && !*pos)
2451                 return (void *)1;
2452
2453         return __g_next(m, pos);
2454 }
2455
2456 static void g_stop(struct seq_file *m, void *p)
2457 {
2458         mutex_unlock(&graph_lock);
2459 }
2460
2461 static int g_show(struct seq_file *m, void *v)
2462 {
2463         unsigned long *ptr = v;
2464
2465         if (!ptr)
2466                 return 0;
2467
2468         if (ptr == (unsigned long *)1) {
2469                 seq_printf(m, "#### all functions enabled ####\n");
2470                 return 0;
2471         }
2472
2473         seq_printf(m, "%ps\n", (void *)*ptr);
2474
2475         return 0;
2476 }
2477
2478 static const struct seq_operations ftrace_graph_seq_ops = {
2479         .start = g_start,
2480         .next = g_next,
2481         .stop = g_stop,
2482         .show = g_show,
2483 };
2484
2485 static int
2486 ftrace_graph_open(struct inode *inode, struct file *file)
2487 {
2488         int ret = 0;
2489
2490         if (unlikely(ftrace_disabled))
2491                 return -ENODEV;
2492
2493         mutex_lock(&graph_lock);
2494         if ((file->f_mode & FMODE_WRITE) &&
2495             (file->f_flags & O_TRUNC)) {
2496                 ftrace_graph_filter_enabled = 0;
2497                 ftrace_graph_count = 0;
2498                 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
2499         }
2500         mutex_unlock(&graph_lock);
2501
2502         if (file->f_mode & FMODE_READ)
2503                 ret = seq_open(file, &ftrace_graph_seq_ops);
2504
2505         return ret;
2506 }
2507
2508 static int
2509 ftrace_graph_release(struct inode *inode, struct file *file)
2510 {
2511         if (file->f_mode & FMODE_READ)
2512                 seq_release(inode, file);
2513         return 0;
2514 }
2515
2516 static int
2517 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
2518 {
2519         struct dyn_ftrace *rec;
2520         struct ftrace_page *pg;
2521         int search_len;
2522         int fail = 1;
2523         int type, not;
2524         char *search;
2525         bool exists;
2526         int i;
2527
2528         if (ftrace_disabled)
2529                 return -ENODEV;
2530
2531         /* decode regex */
2532         type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
2533         if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
2534                 return -EBUSY;
2535
2536         search_len = strlen(search);
2537
2538         mutex_lock(&ftrace_lock);
2539         do_for_each_ftrace_rec(pg, rec) {
2540
2541                 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
2542                         continue;
2543
2544                 if (ftrace_match_record(rec, search, search_len, type)) {
2545                         /* if it is in the array */
2546                         exists = false;
2547                         for (i = 0; i < *idx; i++) {
2548                                 if (array[i] == rec->ip) {
2549                                         exists = true;
2550                                         break;
2551                                 }
2552                         }
2553
2554                         if (!not) {
2555                                 fail = 0;
2556                                 if (!exists) {
2557                                         array[(*idx)++] = rec->ip;
2558                                         if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
2559                                                 goto out;
2560                                 }
2561                         } else {
2562                                 if (exists) {
2563                                         array[i] = array[--(*idx)];
2564                                         array[*idx] = 0;
2565                                         fail = 0;
2566                                 }
2567                         }
2568                 }
2569         } while_for_each_ftrace_rec();
2570 out:
2571         mutex_unlock(&ftrace_lock);
2572
2573         if (fail)
2574                 return -EINVAL;
2575
2576         ftrace_graph_filter_enabled = 1;
2577         return 0;
2578 }
2579
2580 static ssize_t
2581 ftrace_graph_write(struct file *file, const char __user *ubuf,
2582                    size_t cnt, loff_t *ppos)
2583 {
2584         struct trace_parser parser;
2585         ssize_t read, ret;
2586
2587         if (!cnt)
2588                 return 0;
2589
2590         mutex_lock(&graph_lock);
2591
2592         if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
2593                 ret = -ENOMEM;
2594                 goto out_unlock;
2595         }
2596
2597         read = trace_get_user(&parser, ubuf, cnt, ppos);
2598
2599         if (read >= 0 && trace_parser_loaded((&parser))) {
2600                 parser.buffer[parser.idx] = 0;
2601
2602                 /* we allow only one expression at a time */
2603                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
2604                                         parser.buffer);
2605                 if (ret)
2606                         goto out_free;
2607         }
2608
2609         ret = read;
2610
2611 out_free:
2612         trace_parser_put(&parser);
2613 out_unlock:
2614         mutex_unlock(&graph_lock);
2615
2616         return ret;
2617 }
2618
2619 static const struct file_operations ftrace_graph_fops = {
2620         .open           = ftrace_graph_open,
2621         .read           = seq_read,
2622         .write          = ftrace_graph_write,
2623         .release        = ftrace_graph_release,
2624 };
2625 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2626
2627 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
2628 {
2629
2630         trace_create_file("available_filter_functions", 0444,
2631                         d_tracer, NULL, &ftrace_avail_fops);
2632
2633         trace_create_file("failures", 0444,
2634                         d_tracer, NULL, &ftrace_failures_fops);
2635
2636         trace_create_file("set_ftrace_filter", 0644, d_tracer,
2637                         NULL, &ftrace_filter_fops);
2638
2639         trace_create_file("set_ftrace_notrace", 0644, d_tracer,
2640                                     NULL, &ftrace_notrace_fops);
2641
2642 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2643         trace_create_file("set_graph_function", 0444, d_tracer,
2644                                     NULL,
2645                                     &ftrace_graph_fops);
2646 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2647
2648         return 0;
2649 }
2650
2651 static int ftrace_process_locs(struct module *mod,
2652                                unsigned long *start,
2653                                unsigned long *end)
2654 {
2655         unsigned long *p;
2656         unsigned long addr;
2657         unsigned long flags;
2658
2659         mutex_lock(&ftrace_lock);
2660         p = start;
2661         while (p < end) {
2662                 addr = ftrace_call_adjust(*p++);
2663                 /*
2664                  * Some architecture linkers will pad between
2665                  * the different mcount_loc sections of different
2666                  * object files to satisfy alignments.
2667                  * Skip any NULL pointers.
2668                  */
2669                 if (!addr)
2670                         continue;
2671                 ftrace_record_ip(addr);
2672         }
2673
2674         /* disable interrupts to prevent kstop machine */
2675         local_irq_save(flags);
2676         ftrace_update_code(mod);
2677         local_irq_restore(flags);
2678         mutex_unlock(&ftrace_lock);
2679
2680         return 0;
2681 }
2682
2683 #ifdef CONFIG_MODULES
2684 void ftrace_release_mod(struct module *mod)
2685 {
2686         struct dyn_ftrace *rec;
2687         struct ftrace_page *pg;
2688
2689         if (ftrace_disabled)
2690                 return;
2691
2692         mutex_lock(&ftrace_lock);
2693         do_for_each_ftrace_rec(pg, rec) {
2694                 if (within_module_core(rec->ip, mod)) {
2695                         /*
2696                          * rec->ip is changed in ftrace_free_rec()
2697                          * It should not between s and e if record was freed.
2698                          */
2699                         FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
2700                         ftrace_free_rec(rec);
2701                 }
2702         } while_for_each_ftrace_rec();
2703         mutex_unlock(&ftrace_lock);
2704 }
2705
2706 static void ftrace_init_module(struct module *mod,
2707                                unsigned long *start, unsigned long *end)
2708 {
2709         if (ftrace_disabled || start == end)
2710                 return;
2711         ftrace_process_locs(mod, start, end);
2712 }
2713
2714 static int ftrace_module_notify(struct notifier_block *self,
2715                                 unsigned long val, void *data)
2716 {
2717         struct module *mod = data;
2718
2719         switch (val) {
2720         case MODULE_STATE_COMING:
2721                 ftrace_init_module(mod, mod->ftrace_callsites,
2722                                    mod->ftrace_callsites +
2723                                    mod->num_ftrace_callsites);
2724                 break;
2725         case MODULE_STATE_GOING:
2726                 ftrace_release_mod(mod);
2727                 break;
2728         }
2729
2730         return 0;
2731 }
2732 #else
2733 static int ftrace_module_notify(struct notifier_block *self,
2734                                 unsigned long val, void *data)
2735 {
2736         return 0;
2737 }
2738 #endif /* CONFIG_MODULES */
2739
2740 struct notifier_block ftrace_module_nb = {
2741         .notifier_call = ftrace_module_notify,
2742         .priority = 0,
2743 };
2744
2745 extern unsigned long __start_mcount_loc[];
2746 extern unsigned long __stop_mcount_loc[];
2747
2748 void __init ftrace_init(void)
2749 {
2750         unsigned long count, addr, flags;
2751         int ret;
2752
2753         /* Keep the ftrace pointer to the stub */
2754         addr = (unsigned long)ftrace_stub;
2755
2756         local_irq_save(flags);
2757         ftrace_dyn_arch_init(&addr);
2758         local_irq_restore(flags);
2759
2760         /* ftrace_dyn_arch_init places the return code in addr */
2761         if (addr)
2762                 goto failed;
2763
2764         count = __stop_mcount_loc - __start_mcount_loc;
2765
2766         ret = ftrace_dyn_table_alloc(count);
2767         if (ret)
2768                 goto failed;
2769
2770         last_ftrace_enabled = ftrace_enabled = 1;
2771
2772         ret = ftrace_process_locs(NULL,
2773                                   __start_mcount_loc,
2774                                   __stop_mcount_loc);
2775
2776         ret = register_module_notifier(&ftrace_module_nb);
2777         if (ret)
2778                 pr_warning("Failed to register trace ftrace module notifier\n");
2779
2780         set_ftrace_early_filters();
2781
2782         return;
2783  failed:
2784         ftrace_disabled = 1;
2785 }
2786
2787 #else
2788
2789 static int __init ftrace_nodyn_init(void)
2790 {
2791         ftrace_enabled = 1;
2792         return 0;
2793 }
2794 device_initcall(ftrace_nodyn_init);
2795
2796 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
2797 static inline void ftrace_startup_enable(int command) { }
2798 /* Keep as macros so we do not need to define the commands */
2799 # define ftrace_startup(command)        do { } while (0)
2800 # define ftrace_shutdown(command)       do { } while (0)
2801 # define ftrace_startup_sysctl()        do { } while (0)
2802 # define ftrace_shutdown_sysctl()       do { } while (0)
2803 #endif /* CONFIG_DYNAMIC_FTRACE */
2804
2805 static void clear_ftrace_swapper(void)
2806 {
2807         struct task_struct *p;
2808         int cpu;
2809
2810         get_online_cpus();
2811         for_each_online_cpu(cpu) {
2812                 p = idle_task(cpu);
2813                 clear_tsk_trace_trace(p);
2814         }
2815         put_online_cpus();
2816 }
2817
2818 static void set_ftrace_swapper(void)
2819 {
2820         struct task_struct *p;
2821         int cpu;
2822
2823         get_online_cpus();
2824         for_each_online_cpu(cpu) {
2825                 p = idle_task(cpu);
2826                 set_tsk_trace_trace(p);
2827         }
2828         put_online_cpus();
2829 }
2830
2831 static void clear_ftrace_pid(struct pid *pid)
2832 {
2833         struct task_struct *p;
2834
2835         rcu_read_lock();
2836         do_each_pid_task(pid, PIDTYPE_PID, p) {
2837                 clear_tsk_trace_trace(p);
2838         } while_each_pid_task(pid, PIDTYPE_PID, p);
2839         rcu_read_unlock();
2840
2841         put_pid(pid);
2842 }
2843
2844 static void set_ftrace_pid(struct pid *pid)
2845 {
2846         struct task_struct *p;
2847
2848         rcu_read_lock();
2849         do_each_pid_task(pid, PIDTYPE_PID, p) {
2850                 set_tsk_trace_trace(p);
2851         } while_each_pid_task(pid, PIDTYPE_PID, p);
2852         rcu_read_unlock();
2853 }
2854
2855 static void clear_ftrace_pid_task(struct pid *pid)
2856 {
2857         if (pid == ftrace_swapper_pid)
2858                 clear_ftrace_swapper();
2859         else
2860                 clear_ftrace_pid(pid);
2861 }
2862
2863 static void set_ftrace_pid_task(struct pid *pid)
2864 {
2865         if (pid == ftrace_swapper_pid)
2866                 set_ftrace_swapper();
2867         else
2868                 set_ftrace_pid(pid);
2869 }
2870
2871 static int ftrace_pid_add(int p)
2872 {
2873         struct pid *pid;
2874         struct ftrace_pid *fpid;
2875         int ret = -EINVAL;
2876
2877         mutex_lock(&ftrace_lock);
2878
2879         if (!p)
2880                 pid = ftrace_swapper_pid;
2881         else
2882                 pid = find_get_pid(p);
2883
2884         if (!pid)
2885                 goto out;
2886
2887         ret = 0;
2888
2889         list_for_each_entry(fpid, &ftrace_pids, list)
2890                 if (fpid->pid == pid)
2891                         goto out_put;
2892
2893         ret = -ENOMEM;
2894
2895         fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
2896         if (!fpid)
2897                 goto out_put;
2898
2899         list_add(&fpid->list, &ftrace_pids);
2900         fpid->pid = pid;
2901
2902         set_ftrace_pid_task(pid);
2903
2904         ftrace_update_pid_func();
2905         ftrace_startup_enable(0);
2906
2907         mutex_unlock(&ftrace_lock);
2908         return 0;
2909
2910 out_put:
2911         if (pid != ftrace_swapper_pid)
2912                 put_pid(pid);
2913
2914 out:
2915         mutex_unlock(&ftrace_lock);
2916         return ret;
2917 }
2918
2919 static void ftrace_pid_reset(void)
2920 {
2921         struct ftrace_pid *fpid, *safe;
2922
2923         mutex_lock(&ftrace_lock);
2924         list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
2925                 struct pid *pid = fpid->pid;
2926
2927                 clear_ftrace_pid_task(pid);
2928
2929                 list_del(&fpid->list);
2930                 kfree(fpid);
2931         }
2932
2933         ftrace_update_pid_func();
2934         ftrace_startup_enable(0);
2935
2936         mutex_unlock(&ftrace_lock);
2937 }
2938
2939 static void *fpid_start(struct seq_file *m, loff_t *pos)
2940 {
2941         mutex_lock(&ftrace_lock);
2942
2943         if (list_empty(&ftrace_pids) && (!*pos))
2944                 return (void *) 1;
2945
2946         return seq_list_start(&ftrace_pids, *pos);
2947 }
2948
2949 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
2950 {
2951         if (v == (void *)1)
2952                 return NULL;
2953
2954         return seq_list_next(v, &ftrace_pids, pos);
2955 }
2956
2957 static void fpid_stop(struct seq_file *m, void *p)
2958 {
2959         mutex_unlock(&ftrace_lock);
2960 }
2961
2962 static int fpid_show(struct seq_file *m, void *v)
2963 {
2964         const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
2965
2966         if (v == (void *)1) {
2967                 seq_printf(m, "no pid\n");
2968                 return 0;
2969         }
2970
2971         if (fpid->pid == ftrace_swapper_pid)
2972                 seq_printf(m, "swapper tasks\n");
2973         else
2974                 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
2975
2976         return 0;
2977 }
2978
2979 static const struct seq_operations ftrace_pid_sops = {
2980         .start = fpid_start,
2981         .next = fpid_next,
2982         .stop = fpid_stop,
2983         .show = fpid_show,
2984 };
2985
2986 static int
2987 ftrace_pid_open(struct inode *inode, struct file *file)
2988 {
2989         int ret = 0;
2990
2991         if ((file->f_mode & FMODE_WRITE) &&
2992             (file->f_flags & O_TRUNC))
2993                 ftrace_pid_reset();
2994
2995         if (file->f_mode & FMODE_READ)
2996                 ret = seq_open(file, &ftrace_pid_sops);
2997
2998         return ret;
2999 }
3000
3001 static ssize_t
3002 ftrace_pid_write(struct file *filp, const char __user *ubuf,
3003                    size_t cnt, loff_t *ppos)
3004 {
3005         char buf[64], *tmp;
3006         long val;
3007         int ret;
3008
3009         if (cnt >= sizeof(buf))
3010                 return -EINVAL;
3011
3012         if (copy_from_user(&buf, ubuf, cnt))
3013                 return -EFAULT;
3014
3015         buf[cnt] = 0;
3016
3017         /*
3018          * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
3019          * to clean the filter quietly.
3020          */
3021         tmp = strstrip(buf);
3022         if (strlen(tmp) == 0)
3023                 return 1;
3024
3025         ret = strict_strtol(tmp, 10, &val);
3026         if (ret < 0)
3027                 return ret;
3028
3029         ret = ftrace_pid_add(val);
3030
3031         return ret ? ret : cnt;
3032 }
3033
3034 static int
3035 ftrace_pid_release(struct inode *inode, struct file *file)
3036 {
3037         if (file->f_mode & FMODE_READ)
3038                 seq_release(inode, file);
3039
3040         return 0;
3041 }
3042
3043 static const struct file_operations ftrace_pid_fops = {
3044         .open           = ftrace_pid_open,
3045         .write          = ftrace_pid_write,
3046         .read           = seq_read,
3047         .llseek         = seq_lseek,
3048         .release        = ftrace_pid_release,
3049 };
3050
3051 static __init int ftrace_init_debugfs(void)
3052 {
3053         struct dentry *d_tracer;
3054
3055         d_tracer = tracing_init_dentry();
3056         if (!d_tracer)
3057                 return 0;
3058
3059         ftrace_init_dyn_debugfs(d_tracer);
3060
3061         trace_create_file("set_ftrace_pid", 0644, d_tracer,
3062                             NULL, &ftrace_pid_fops);
3063
3064         ftrace_profile_debugfs(d_tracer);
3065
3066         return 0;
3067 }
3068 fs_initcall(ftrace_init_debugfs);
3069
3070 /**
3071  * ftrace_kill - kill ftrace
3072  *
3073  * This function should be used by panic code. It stops ftrace
3074  * but in a not so nice way. If you need to simply kill ftrace
3075  * from a non-atomic section, use ftrace_kill.
3076  */
3077 void ftrace_kill(void)
3078 {
3079         ftrace_disabled = 1;
3080         ftrace_enabled = 0;
3081         clear_ftrace_function();
3082 }
3083
3084 /**
3085  * register_ftrace_function - register a function for profiling
3086  * @ops - ops structure that holds the function for profiling.
3087  *
3088  * Register a function to be called by all functions in the
3089  * kernel.
3090  *
3091  * Note: @ops->func and all the functions it calls must be labeled
3092  *       with "notrace", otherwise it will go into a
3093  *       recursive loop.
3094  */
3095 int register_ftrace_function(struct ftrace_ops *ops)
3096 {
3097         int ret;
3098
3099         if (unlikely(ftrace_disabled))
3100                 return -1;
3101
3102         mutex_lock(&ftrace_lock);
3103
3104         ret = __register_ftrace_function(ops);
3105         ftrace_startup(0);
3106
3107         mutex_unlock(&ftrace_lock);
3108         return ret;
3109 }
3110
3111 /**
3112  * unregister_ftrace_function - unregister a function for profiling.
3113  * @ops - ops structure that holds the function to unregister
3114  *
3115  * Unregister a function that was added to be called by ftrace profiling.
3116  */
3117 int unregister_ftrace_function(struct ftrace_ops *ops)
3118 {
3119         int ret;
3120
3121         mutex_lock(&ftrace_lock);
3122         ret = __unregister_ftrace_function(ops);
3123         ftrace_shutdown(0);
3124         mutex_unlock(&ftrace_lock);
3125
3126         return ret;
3127 }
3128
3129 int
3130 ftrace_enable_sysctl(struct ctl_table *table, int write,
3131                      void __user *buffer, size_t *lenp,
3132                      loff_t *ppos)
3133 {
3134         int ret;
3135
3136         if (unlikely(ftrace_disabled))
3137                 return -ENODEV;
3138
3139         mutex_lock(&ftrace_lock);
3140
3141         ret  = proc_dointvec(table, write, buffer, lenp, ppos);
3142
3143         if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
3144                 goto out;
3145
3146         last_ftrace_enabled = !!ftrace_enabled;
3147
3148         if (ftrace_enabled) {
3149
3150                 ftrace_startup_sysctl();
3151
3152                 /* we are starting ftrace again */
3153                 if (ftrace_list != &ftrace_list_end) {
3154                         if (ftrace_list->next == &ftrace_list_end)
3155                                 ftrace_trace_function = ftrace_list->func;
3156                         else
3157                                 ftrace_trace_function = ftrace_list_func;
3158                 }
3159
3160         } else {
3161                 /* stopping ftrace calls (just send to ftrace_stub) */
3162                 ftrace_trace_function = ftrace_stub;
3163
3164                 ftrace_shutdown_sysctl();
3165         }
3166
3167  out:
3168         mutex_unlock(&ftrace_lock);
3169         return ret;
3170 }
3171
3172 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3173
3174 static int ftrace_graph_active;
3175 static struct notifier_block ftrace_suspend_notifier;
3176
3177 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
3178 {
3179         return 0;
3180 }
3181
3182 /* The callbacks that hook a function */
3183 trace_func_graph_ret_t ftrace_graph_return =
3184                         (trace_func_graph_ret_t)ftrace_stub;
3185 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
3186
3187 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
3188 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
3189 {
3190         int i;
3191         int ret = 0;
3192         unsigned long flags;
3193         int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
3194         struct task_struct *g, *t;
3195
3196         for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
3197                 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
3198                                         * sizeof(struct ftrace_ret_stack),
3199                                         GFP_KERNEL);
3200                 if (!ret_stack_list[i]) {
3201                         start = 0;
3202                         end = i;
3203                         ret = -ENOMEM;
3204                         goto free;
3205                 }
3206         }
3207
3208         read_lock_irqsave(&tasklist_lock, flags);
3209         do_each_thread(g, t) {
3210                 if (start == end) {
3211                         ret = -EAGAIN;
3212                         goto unlock;
3213                 }
3214
3215                 if (t->ret_stack == NULL) {
3216                         atomic_set(&t->tracing_graph_pause, 0);
3217                         atomic_set(&t->trace_overrun, 0);
3218                         t->curr_ret_stack = -1;
3219                         /* Make sure the tasks see the -1 first: */
3220                         smp_wmb();
3221                         t->ret_stack = ret_stack_list[start++];
3222                 }
3223         } while_each_thread(g, t);
3224
3225 unlock:
3226         read_unlock_irqrestore(&tasklist_lock, flags);
3227 free:
3228         for (i = start; i < end; i++)
3229                 kfree(ret_stack_list[i]);
3230         return ret;
3231 }
3232
3233 static void
3234 ftrace_graph_probe_sched_switch(struct rq *__rq, struct task_struct *prev,
3235                                 struct task_struct *next)
3236 {
3237         unsigned long long timestamp;
3238         int index;
3239
3240         /*
3241          * Does the user want to count the time a function was asleep.
3242          * If so, do not update the time stamps.
3243          */
3244         if (trace_flags & TRACE_ITER_SLEEP_TIME)
3245                 return;
3246
3247         timestamp = trace_clock_local();
3248
3249         prev->ftrace_timestamp = timestamp;
3250
3251         /* only process tasks that we timestamped */
3252         if (!next->ftrace_timestamp)
3253                 return;
3254
3255         /*
3256          * Update all the counters in next to make up for the
3257          * time next was sleeping.
3258          */
3259         timestamp -= next->ftrace_timestamp;
3260
3261         for (index = next->curr_ret_stack; index >= 0; index--)
3262                 next->ret_stack[index].calltime += timestamp;
3263 }
3264
3265 /* Allocate a return stack for each task */
3266 static int start_graph_tracing(void)
3267 {
3268         struct ftrace_ret_stack **ret_stack_list;
3269         int ret, cpu;
3270
3271         ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
3272                                 sizeof(struct ftrace_ret_stack *),
3273                                 GFP_KERNEL);
3274
3275         if (!ret_stack_list)
3276                 return -ENOMEM;
3277
3278         /* The cpu_boot init_task->ret_stack will never be freed */
3279         for_each_online_cpu(cpu) {
3280                 if (!idle_task(cpu)->ret_stack)
3281                         ftrace_graph_init_task(idle_task(cpu));
3282         }
3283
3284         do {
3285                 ret = alloc_retstack_tasklist(ret_stack_list);
3286         } while (ret == -EAGAIN);
3287
3288         if (!ret) {
3289                 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch);
3290                 if (ret)
3291                         pr_info("ftrace_graph: Couldn't activate tracepoint"
3292                                 " probe to kernel_sched_switch\n");
3293         }
3294
3295         kfree(ret_stack_list);
3296         return ret;
3297 }
3298
3299 /*
3300  * Hibernation protection.
3301  * The state of the current task is too much unstable during
3302  * suspend/restore to disk. We want to protect against that.
3303  */
3304 static int
3305 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
3306                                                         void *unused)
3307 {
3308         switch (state) {
3309         case PM_HIBERNATION_PREPARE:
3310                 pause_graph_tracing();
3311                 break;
3312
3313         case PM_POST_HIBERNATION:
3314                 unpause_graph_tracing();
3315                 break;
3316         }
3317         return NOTIFY_DONE;
3318 }
3319
3320 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
3321                         trace_func_graph_ent_t entryfunc)
3322 {
3323         int ret = 0;
3324
3325         mutex_lock(&ftrace_lock);
3326
3327         /* we currently allow only one tracer registered at a time */
3328         if (ftrace_graph_active) {
3329                 ret = -EBUSY;
3330                 goto out;
3331         }
3332
3333         ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
3334         register_pm_notifier(&ftrace_suspend_notifier);
3335
3336         ftrace_graph_active++;
3337         ret = start_graph_tracing();
3338         if (ret) {
3339                 ftrace_graph_active--;
3340                 goto out;
3341         }
3342
3343         ftrace_graph_return = retfunc;
3344         ftrace_graph_entry = entryfunc;
3345
3346         ftrace_startup(FTRACE_START_FUNC_RET);
3347
3348 out:
3349         mutex_unlock(&ftrace_lock);
3350         return ret;
3351 }
3352
3353 void unregister_ftrace_graph(void)
3354 {
3355         mutex_lock(&ftrace_lock);
3356
3357         if (unlikely(!ftrace_graph_active))
3358                 goto out;
3359
3360         ftrace_graph_active--;
3361         unregister_trace_sched_switch(ftrace_graph_probe_sched_switch);
3362         ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
3363         ftrace_graph_entry = ftrace_graph_entry_stub;
3364         ftrace_shutdown(FTRACE_STOP_FUNC_RET);
3365         unregister_pm_notifier(&ftrace_suspend_notifier);
3366
3367  out:
3368         mutex_unlock(&ftrace_lock);
3369 }
3370
3371 /* Allocate a return stack for newly created task */
3372 void ftrace_graph_init_task(struct task_struct *t)
3373 {
3374         /* Make sure we do not use the parent ret_stack */
3375         t->ret_stack = NULL;
3376
3377         if (ftrace_graph_active) {
3378                 struct ftrace_ret_stack *ret_stack;
3379
3380                 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
3381                                 * sizeof(struct ftrace_ret_stack),
3382                                 GFP_KERNEL);
3383                 if (!ret_stack)
3384                         return;
3385                 t->curr_ret_stack = -1;
3386                 atomic_set(&t->tracing_graph_pause, 0);
3387                 atomic_set(&t->trace_overrun, 0);
3388                 t->ftrace_timestamp = 0;
3389                 /* make curr_ret_stack visable before we add the ret_stack */
3390                 smp_wmb();
3391                 t->ret_stack = ret_stack;
3392         }
3393 }
3394
3395 void ftrace_graph_exit_task(struct task_struct *t)
3396 {
3397         struct ftrace_ret_stack *ret_stack = t->ret_stack;
3398
3399         t->ret_stack = NULL;
3400         /* NULL must become visible to IRQs before we free it: */
3401         barrier();
3402
3403         kfree(ret_stack);
3404 }
3405
3406 void ftrace_graph_stop(void)
3407 {
3408         ftrace_stop();
3409 }
3410 #endif