tracing/branch-tracer: fix a trace recursion on branch tracer
[safe/jmp/linux-2.6] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/kprobes.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/list.h>
29
30 #include <asm/ftrace.h>
31
32 #include "trace.h"
33
34 #define FTRACE_WARN_ON(cond)                    \
35         do {                                    \
36                 if (WARN_ON(cond))              \
37                         ftrace_kill();          \
38         } while (0)
39
40 #define FTRACE_WARN_ON_ONCE(cond)               \
41         do {                                    \
42                 if (WARN_ON_ONCE(cond))         \
43                         ftrace_kill();          \
44         } while (0)
45
46 /* ftrace_enabled is a method to turn ftrace on or off */
47 int ftrace_enabled __read_mostly;
48 static int last_ftrace_enabled;
49
50 /* Quick disabling of function tracer. */
51 int function_trace_stop;
52
53 /*
54  * ftrace_disabled is set when an anomaly is discovered.
55  * ftrace_disabled is much stronger than ftrace_enabled.
56  */
57 static int ftrace_disabled __read_mostly;
58
59 static DEFINE_SPINLOCK(ftrace_lock);
60 static DEFINE_MUTEX(ftrace_sysctl_lock);
61
62 static struct ftrace_ops ftrace_list_end __read_mostly =
63 {
64         .func = ftrace_stub,
65 };
66
67 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
68 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
69 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
70
71 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
72 {
73         struct ftrace_ops *op = ftrace_list;
74
75         /* in case someone actually ports this to alpha! */
76         read_barrier_depends();
77
78         while (op != &ftrace_list_end) {
79                 /* silly alpha */
80                 read_barrier_depends();
81                 op->func(ip, parent_ip);
82                 op = op->next;
83         };
84 }
85
86 /**
87  * clear_ftrace_function - reset the ftrace function
88  *
89  * This NULLs the ftrace function and in essence stops
90  * tracing.  There may be lag
91  */
92 void clear_ftrace_function(void)
93 {
94         ftrace_trace_function = ftrace_stub;
95         __ftrace_trace_function = ftrace_stub;
96 }
97
98 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
99 /*
100  * For those archs that do not test ftrace_trace_stop in their
101  * mcount call site, we need to do it from C.
102  */
103 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
104 {
105         if (function_trace_stop)
106                 return;
107
108         __ftrace_trace_function(ip, parent_ip);
109 }
110 #endif
111
112 static int __register_ftrace_function(struct ftrace_ops *ops)
113 {
114         /* should not be called from interrupt context */
115         spin_lock(&ftrace_lock);
116
117         ops->next = ftrace_list;
118         /*
119          * We are entering ops into the ftrace_list but another
120          * CPU might be walking that list. We need to make sure
121          * the ops->next pointer is valid before another CPU sees
122          * the ops pointer included into the ftrace_list.
123          */
124         smp_wmb();
125         ftrace_list = ops;
126
127         if (ftrace_enabled) {
128                 /*
129                  * For one func, simply call it directly.
130                  * For more than one func, call the chain.
131                  */
132 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
133                 if (ops->next == &ftrace_list_end)
134                         ftrace_trace_function = ops->func;
135                 else
136                         ftrace_trace_function = ftrace_list_func;
137 #else
138                 if (ops->next == &ftrace_list_end)
139                         __ftrace_trace_function = ops->func;
140                 else
141                         __ftrace_trace_function = ftrace_list_func;
142                 ftrace_trace_function = ftrace_test_stop_func;
143 #endif
144         }
145
146         spin_unlock(&ftrace_lock);
147
148         return 0;
149 }
150
151 static int __unregister_ftrace_function(struct ftrace_ops *ops)
152 {
153         struct ftrace_ops **p;
154         int ret = 0;
155
156         /* should not be called from interrupt context */
157         spin_lock(&ftrace_lock);
158
159         /*
160          * If we are removing the last function, then simply point
161          * to the ftrace_stub.
162          */
163         if (ftrace_list == ops && ops->next == &ftrace_list_end) {
164                 ftrace_trace_function = ftrace_stub;
165                 ftrace_list = &ftrace_list_end;
166                 goto out;
167         }
168
169         for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
170                 if (*p == ops)
171                         break;
172
173         if (*p != ops) {
174                 ret = -1;
175                 goto out;
176         }
177
178         *p = (*p)->next;
179
180         if (ftrace_enabled) {
181                 /* If we only have one func left, then call that directly */
182                 if (ftrace_list->next == &ftrace_list_end)
183                         ftrace_trace_function = ftrace_list->func;
184         }
185
186  out:
187         spin_unlock(&ftrace_lock);
188
189         return ret;
190 }
191
192 #ifdef CONFIG_DYNAMIC_FTRACE
193 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
194 # error Dynamic ftrace depends on MCOUNT_RECORD
195 #endif
196
197 /*
198  * Since MCOUNT_ADDR may point to mcount itself, we do not want
199  * to get it confused by reading a reference in the code as we
200  * are parsing on objcopy output of text. Use a variable for
201  * it instead.
202  */
203 static unsigned long mcount_addr = MCOUNT_ADDR;
204
205 enum {
206         FTRACE_ENABLE_CALLS             = (1 << 0),
207         FTRACE_DISABLE_CALLS            = (1 << 1),
208         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
209         FTRACE_ENABLE_MCOUNT            = (1 << 3),
210         FTRACE_DISABLE_MCOUNT           = (1 << 4),
211 };
212
213 static int ftrace_filtered;
214
215 static LIST_HEAD(ftrace_new_addrs);
216
217 static DEFINE_MUTEX(ftrace_regex_lock);
218
219 struct ftrace_page {
220         struct ftrace_page      *next;
221         unsigned long           index;
222         struct dyn_ftrace       records[];
223 };
224
225 #define ENTRIES_PER_PAGE \
226   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
227
228 /* estimate from running different kernels */
229 #define NR_TO_INIT              10000
230
231 static struct ftrace_page       *ftrace_pages_start;
232 static struct ftrace_page       *ftrace_pages;
233
234 static struct dyn_ftrace *ftrace_free_records;
235
236
237 #ifdef CONFIG_KPROBES
238
239 static int frozen_record_count;
240
241 static inline void freeze_record(struct dyn_ftrace *rec)
242 {
243         if (!(rec->flags & FTRACE_FL_FROZEN)) {
244                 rec->flags |= FTRACE_FL_FROZEN;
245                 frozen_record_count++;
246         }
247 }
248
249 static inline void unfreeze_record(struct dyn_ftrace *rec)
250 {
251         if (rec->flags & FTRACE_FL_FROZEN) {
252                 rec->flags &= ~FTRACE_FL_FROZEN;
253                 frozen_record_count--;
254         }
255 }
256
257 static inline int record_frozen(struct dyn_ftrace *rec)
258 {
259         return rec->flags & FTRACE_FL_FROZEN;
260 }
261 #else
262 # define freeze_record(rec)                     ({ 0; })
263 # define unfreeze_record(rec)                   ({ 0; })
264 # define record_frozen(rec)                     ({ 0; })
265 #endif /* CONFIG_KPROBES */
266
267 static void ftrace_free_rec(struct dyn_ftrace *rec)
268 {
269         rec->ip = (unsigned long)ftrace_free_records;
270         ftrace_free_records = rec;
271         rec->flags |= FTRACE_FL_FREE;
272 }
273
274 void ftrace_release(void *start, unsigned long size)
275 {
276         struct dyn_ftrace *rec;
277         struct ftrace_page *pg;
278         unsigned long s = (unsigned long)start;
279         unsigned long e = s + size;
280         int i;
281
282         if (ftrace_disabled || !start)
283                 return;
284
285         /* should not be called from interrupt context */
286         spin_lock(&ftrace_lock);
287
288         for (pg = ftrace_pages_start; pg; pg = pg->next) {
289                 for (i = 0; i < pg->index; i++) {
290                         rec = &pg->records[i];
291
292                         if ((rec->ip >= s) && (rec->ip < e))
293                                 ftrace_free_rec(rec);
294                 }
295         }
296         spin_unlock(&ftrace_lock);
297 }
298
299 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
300 {
301         struct dyn_ftrace *rec;
302
303         /* First check for freed records */
304         if (ftrace_free_records) {
305                 rec = ftrace_free_records;
306
307                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
308                         FTRACE_WARN_ON_ONCE(1);
309                         ftrace_free_records = NULL;
310                         return NULL;
311                 }
312
313                 ftrace_free_records = (void *)rec->ip;
314                 memset(rec, 0, sizeof(*rec));
315                 return rec;
316         }
317
318         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
319                 if (!ftrace_pages->next) {
320                         /* allocate another page */
321                         ftrace_pages->next =
322                                 (void *)get_zeroed_page(GFP_KERNEL);
323                         if (!ftrace_pages->next)
324                                 return NULL;
325                 }
326                 ftrace_pages = ftrace_pages->next;
327         }
328
329         return &ftrace_pages->records[ftrace_pages->index++];
330 }
331
332 static struct dyn_ftrace *
333 ftrace_record_ip(unsigned long ip)
334 {
335         struct dyn_ftrace *rec;
336
337         if (!ftrace_enabled || ftrace_disabled)
338                 return NULL;
339
340         rec = ftrace_alloc_dyn_node(ip);
341         if (!rec)
342                 return NULL;
343
344         rec->ip = ip;
345
346         list_add(&rec->list, &ftrace_new_addrs);
347
348         return rec;
349 }
350
351 #define FTRACE_ADDR ((long)(ftrace_caller))
352
353 static int
354 __ftrace_replace_code(struct dyn_ftrace *rec,
355                       unsigned char *old, unsigned char *new, int enable)
356 {
357         unsigned long ip, fl;
358
359         ip = rec->ip;
360
361         if (ftrace_filtered && enable) {
362                 /*
363                  * If filtering is on:
364                  *
365                  * If this record is set to be filtered and
366                  * is enabled then do nothing.
367                  *
368                  * If this record is set to be filtered and
369                  * it is not enabled, enable it.
370                  *
371                  * If this record is not set to be filtered
372                  * and it is not enabled do nothing.
373                  *
374                  * If this record is set not to trace then
375                  * do nothing.
376                  *
377                  * If this record is set not to trace and
378                  * it is enabled then disable it.
379                  *
380                  * If this record is not set to be filtered and
381                  * it is enabled, disable it.
382                  */
383
384                 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
385                                    FTRACE_FL_ENABLED);
386
387                 if ((fl ==  (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
388                     (fl ==  (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
389                     !fl || (fl == FTRACE_FL_NOTRACE))
390                         return 0;
391
392                 /*
393                  * If it is enabled disable it,
394                  * otherwise enable it!
395                  */
396                 if (fl & FTRACE_FL_ENABLED) {
397                         /* swap new and old */
398                         new = old;
399                         old = ftrace_call_replace(ip, FTRACE_ADDR);
400                         rec->flags &= ~FTRACE_FL_ENABLED;
401                 } else {
402                         new = ftrace_call_replace(ip, FTRACE_ADDR);
403                         rec->flags |= FTRACE_FL_ENABLED;
404                 }
405         } else {
406
407                 if (enable) {
408                         /*
409                          * If this record is set not to trace and is
410                          * not enabled, do nothing.
411                          */
412                         fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
413                         if (fl == FTRACE_FL_NOTRACE)
414                                 return 0;
415
416                         new = ftrace_call_replace(ip, FTRACE_ADDR);
417                 } else
418                         old = ftrace_call_replace(ip, FTRACE_ADDR);
419
420                 if (enable) {
421                         if (rec->flags & FTRACE_FL_ENABLED)
422                                 return 0;
423                         rec->flags |= FTRACE_FL_ENABLED;
424                 } else {
425                         if (!(rec->flags & FTRACE_FL_ENABLED))
426                                 return 0;
427                         rec->flags &= ~FTRACE_FL_ENABLED;
428                 }
429         }
430
431         return ftrace_modify_code(ip, old, new);
432 }
433
434 static void ftrace_replace_code(int enable)
435 {
436         int i, failed;
437         unsigned char *new = NULL, *old = NULL;
438         struct dyn_ftrace *rec;
439         struct ftrace_page *pg;
440
441         if (enable)
442                 old = ftrace_nop_replace();
443         else
444                 new = ftrace_nop_replace();
445
446         for (pg = ftrace_pages_start; pg; pg = pg->next) {
447                 for (i = 0; i < pg->index; i++) {
448                         rec = &pg->records[i];
449
450                         /* don't modify code that has already faulted */
451                         if (rec->flags & FTRACE_FL_FAILED)
452                                 continue;
453
454                         /* ignore updates to this record's mcount site */
455                         if (get_kprobe((void *)rec->ip)) {
456                                 freeze_record(rec);
457                                 continue;
458                         } else {
459                                 unfreeze_record(rec);
460                         }
461
462                         failed = __ftrace_replace_code(rec, old, new, enable);
463                         if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
464                                 rec->flags |= FTRACE_FL_FAILED;
465                                 if ((system_state == SYSTEM_BOOTING) ||
466                                     !core_kernel_text(rec->ip)) {
467                                         ftrace_free_rec(rec);
468                                 }
469                         }
470                 }
471         }
472 }
473
474 static void print_ip_ins(const char *fmt, unsigned char *p)
475 {
476         int i;
477
478         printk(KERN_CONT "%s", fmt);
479
480         for (i = 0; i < MCOUNT_INSN_SIZE; i++)
481                 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
482 }
483
484 static int
485 ftrace_code_disable(struct dyn_ftrace *rec)
486 {
487         unsigned long ip;
488         unsigned char *nop, *call;
489         int ret;
490
491         ip = rec->ip;
492
493         nop = ftrace_nop_replace();
494         call = ftrace_call_replace(ip, mcount_addr);
495
496         ret = ftrace_modify_code(ip, call, nop);
497         if (ret) {
498                 switch (ret) {
499                 case -EFAULT:
500                         FTRACE_WARN_ON_ONCE(1);
501                         pr_info("ftrace faulted on modifying ");
502                         print_ip_sym(ip);
503                         break;
504                 case -EINVAL:
505                         FTRACE_WARN_ON_ONCE(1);
506                         pr_info("ftrace failed to modify ");
507                         print_ip_sym(ip);
508                         print_ip_ins(" expected: ", call);
509                         print_ip_ins(" actual: ", (unsigned char *)ip);
510                         print_ip_ins(" replace: ", nop);
511                         printk(KERN_CONT "\n");
512                         break;
513                 case -EPERM:
514                         FTRACE_WARN_ON_ONCE(1);
515                         pr_info("ftrace faulted on writing ");
516                         print_ip_sym(ip);
517                         break;
518                 default:
519                         FTRACE_WARN_ON_ONCE(1);
520                         pr_info("ftrace faulted on unknown error ");
521                         print_ip_sym(ip);
522                 }
523
524                 rec->flags |= FTRACE_FL_FAILED;
525                 return 0;
526         }
527         return 1;
528 }
529
530 static int __ftrace_modify_code(void *data)
531 {
532         int *command = data;
533
534         if (*command & FTRACE_ENABLE_CALLS)
535                 ftrace_replace_code(1);
536         else if (*command & FTRACE_DISABLE_CALLS)
537                 ftrace_replace_code(0);
538
539         if (*command & FTRACE_UPDATE_TRACE_FUNC)
540                 ftrace_update_ftrace_func(ftrace_trace_function);
541
542         return 0;
543 }
544
545 static void ftrace_run_update_code(int command)
546 {
547         stop_machine(__ftrace_modify_code, &command, NULL);
548 }
549
550 static ftrace_func_t saved_ftrace_func;
551 static int ftrace_start_up;
552 static DEFINE_MUTEX(ftrace_start_lock);
553
554 static void ftrace_startup(void)
555 {
556         int command = 0;
557
558         if (unlikely(ftrace_disabled))
559                 return;
560
561         mutex_lock(&ftrace_start_lock);
562         ftrace_start_up++;
563         if (ftrace_start_up == 1)
564                 command |= FTRACE_ENABLE_CALLS;
565
566         if (saved_ftrace_func != ftrace_trace_function) {
567                 saved_ftrace_func = ftrace_trace_function;
568                 command |= FTRACE_UPDATE_TRACE_FUNC;
569         }
570
571         if (!command || !ftrace_enabled)
572                 goto out;
573
574         ftrace_run_update_code(command);
575  out:
576         mutex_unlock(&ftrace_start_lock);
577 }
578
579 static void ftrace_shutdown(void)
580 {
581         int command = 0;
582
583         if (unlikely(ftrace_disabled))
584                 return;
585
586         mutex_lock(&ftrace_start_lock);
587         ftrace_start_up--;
588         if (!ftrace_start_up)
589                 command |= FTRACE_DISABLE_CALLS;
590
591         if (saved_ftrace_func != ftrace_trace_function) {
592                 saved_ftrace_func = ftrace_trace_function;
593                 command |= FTRACE_UPDATE_TRACE_FUNC;
594         }
595
596         if (!command || !ftrace_enabled)
597                 goto out;
598
599         ftrace_run_update_code(command);
600  out:
601         mutex_unlock(&ftrace_start_lock);
602 }
603
604 static void ftrace_startup_sysctl(void)
605 {
606         int command = FTRACE_ENABLE_MCOUNT;
607
608         if (unlikely(ftrace_disabled))
609                 return;
610
611         mutex_lock(&ftrace_start_lock);
612         /* Force update next time */
613         saved_ftrace_func = NULL;
614         /* ftrace_start_up is true if we want ftrace running */
615         if (ftrace_start_up)
616                 command |= FTRACE_ENABLE_CALLS;
617
618         ftrace_run_update_code(command);
619         mutex_unlock(&ftrace_start_lock);
620 }
621
622 static void ftrace_shutdown_sysctl(void)
623 {
624         int command = FTRACE_DISABLE_MCOUNT;
625
626         if (unlikely(ftrace_disabled))
627                 return;
628
629         mutex_lock(&ftrace_start_lock);
630         /* ftrace_start_up is true if ftrace is running */
631         if (ftrace_start_up)
632                 command |= FTRACE_DISABLE_CALLS;
633
634         ftrace_run_update_code(command);
635         mutex_unlock(&ftrace_start_lock);
636 }
637
638 static cycle_t          ftrace_update_time;
639 static unsigned long    ftrace_update_cnt;
640 unsigned long           ftrace_update_tot_cnt;
641
642 static int ftrace_update_code(void)
643 {
644         struct dyn_ftrace *p, *t;
645         cycle_t start, stop;
646
647         start = ftrace_now(raw_smp_processor_id());
648         ftrace_update_cnt = 0;
649
650         list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
651
652                 /* If something went wrong, bail without enabling anything */
653                 if (unlikely(ftrace_disabled))
654                         return -1;
655
656                 list_del_init(&p->list);
657
658                 /* convert record (i.e, patch mcount-call with NOP) */
659                 if (ftrace_code_disable(p)) {
660                         p->flags |= FTRACE_FL_CONVERTED;
661                         ftrace_update_cnt++;
662                 } else
663                         ftrace_free_rec(p);
664         }
665
666         stop = ftrace_now(raw_smp_processor_id());
667         ftrace_update_time = stop - start;
668         ftrace_update_tot_cnt += ftrace_update_cnt;
669
670         return 0;
671 }
672
673 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
674 {
675         struct ftrace_page *pg;
676         int cnt;
677         int i;
678
679         /* allocate a few pages */
680         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
681         if (!ftrace_pages_start)
682                 return -1;
683
684         /*
685          * Allocate a few more pages.
686          *
687          * TODO: have some parser search vmlinux before
688          *   final linking to find all calls to ftrace.
689          *   Then we can:
690          *    a) know how many pages to allocate.
691          *     and/or
692          *    b) set up the table then.
693          *
694          *  The dynamic code is still necessary for
695          *  modules.
696          */
697
698         pg = ftrace_pages = ftrace_pages_start;
699
700         cnt = num_to_init / ENTRIES_PER_PAGE;
701         pr_info("ftrace: allocating %ld entries in %d pages\n",
702                 num_to_init, cnt);
703
704         for (i = 0; i < cnt; i++) {
705                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
706
707                 /* If we fail, we'll try later anyway */
708                 if (!pg->next)
709                         break;
710
711                 pg = pg->next;
712         }
713
714         return 0;
715 }
716
717 enum {
718         FTRACE_ITER_FILTER      = (1 << 0),
719         FTRACE_ITER_CONT        = (1 << 1),
720         FTRACE_ITER_NOTRACE     = (1 << 2),
721         FTRACE_ITER_FAILURES    = (1 << 3),
722 };
723
724 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
725
726 struct ftrace_iterator {
727         loff_t                  pos;
728         struct ftrace_page      *pg;
729         unsigned                idx;
730         unsigned                flags;
731         unsigned char           buffer[FTRACE_BUFF_MAX+1];
732         unsigned                buffer_idx;
733         unsigned                filtered;
734 };
735
736 static void *
737 t_next(struct seq_file *m, void *v, loff_t *pos)
738 {
739         struct ftrace_iterator *iter = m->private;
740         struct dyn_ftrace *rec = NULL;
741
742         (*pos)++;
743
744         /* should not be called from interrupt context */
745         spin_lock(&ftrace_lock);
746  retry:
747         if (iter->idx >= iter->pg->index) {
748                 if (iter->pg->next) {
749                         iter->pg = iter->pg->next;
750                         iter->idx = 0;
751                         goto retry;
752                 }
753         } else {
754                 rec = &iter->pg->records[iter->idx++];
755                 if ((rec->flags & FTRACE_FL_FREE) ||
756
757                     (!(iter->flags & FTRACE_ITER_FAILURES) &&
758                      (rec->flags & FTRACE_FL_FAILED)) ||
759
760                     ((iter->flags & FTRACE_ITER_FAILURES) &&
761                      !(rec->flags & FTRACE_FL_FAILED)) ||
762
763                     ((iter->flags & FTRACE_ITER_FILTER) &&
764                      !(rec->flags & FTRACE_FL_FILTER)) ||
765
766                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
767                      !(rec->flags & FTRACE_FL_NOTRACE))) {
768                         rec = NULL;
769                         goto retry;
770                 }
771         }
772         spin_unlock(&ftrace_lock);
773
774         iter->pos = *pos;
775
776         return rec;
777 }
778
779 static void *t_start(struct seq_file *m, loff_t *pos)
780 {
781         struct ftrace_iterator *iter = m->private;
782         void *p = NULL;
783         loff_t l = -1;
784
785         if (*pos != iter->pos) {
786                 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
787                         ;
788         } else {
789                 l = *pos;
790                 p = t_next(m, p, &l);
791         }
792
793         return p;
794 }
795
796 static void t_stop(struct seq_file *m, void *p)
797 {
798 }
799
800 static int t_show(struct seq_file *m, void *v)
801 {
802         struct dyn_ftrace *rec = v;
803         char str[KSYM_SYMBOL_LEN];
804
805         if (!rec)
806                 return 0;
807
808         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
809
810         seq_printf(m, "%s\n", str);
811
812         return 0;
813 }
814
815 static struct seq_operations show_ftrace_seq_ops = {
816         .start = t_start,
817         .next = t_next,
818         .stop = t_stop,
819         .show = t_show,
820 };
821
822 static int
823 ftrace_avail_open(struct inode *inode, struct file *file)
824 {
825         struct ftrace_iterator *iter;
826         int ret;
827
828         if (unlikely(ftrace_disabled))
829                 return -ENODEV;
830
831         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
832         if (!iter)
833                 return -ENOMEM;
834
835         iter->pg = ftrace_pages_start;
836         iter->pos = -1;
837
838         ret = seq_open(file, &show_ftrace_seq_ops);
839         if (!ret) {
840                 struct seq_file *m = file->private_data;
841
842                 m->private = iter;
843         } else {
844                 kfree(iter);
845         }
846
847         return ret;
848 }
849
850 int ftrace_avail_release(struct inode *inode, struct file *file)
851 {
852         struct seq_file *m = (struct seq_file *)file->private_data;
853         struct ftrace_iterator *iter = m->private;
854
855         seq_release(inode, file);
856         kfree(iter);
857
858         return 0;
859 }
860
861 static int
862 ftrace_failures_open(struct inode *inode, struct file *file)
863 {
864         int ret;
865         struct seq_file *m;
866         struct ftrace_iterator *iter;
867
868         ret = ftrace_avail_open(inode, file);
869         if (!ret) {
870                 m = (struct seq_file *)file->private_data;
871                 iter = (struct ftrace_iterator *)m->private;
872                 iter->flags = FTRACE_ITER_FAILURES;
873         }
874
875         return ret;
876 }
877
878
879 static void ftrace_filter_reset(int enable)
880 {
881         struct ftrace_page *pg;
882         struct dyn_ftrace *rec;
883         unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
884         unsigned i;
885
886         /* should not be called from interrupt context */
887         spin_lock(&ftrace_lock);
888         if (enable)
889                 ftrace_filtered = 0;
890         pg = ftrace_pages_start;
891         while (pg) {
892                 for (i = 0; i < pg->index; i++) {
893                         rec = &pg->records[i];
894                         if (rec->flags & FTRACE_FL_FAILED)
895                                 continue;
896                         rec->flags &= ~type;
897                 }
898                 pg = pg->next;
899         }
900         spin_unlock(&ftrace_lock);
901 }
902
903 static int
904 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
905 {
906         struct ftrace_iterator *iter;
907         int ret = 0;
908
909         if (unlikely(ftrace_disabled))
910                 return -ENODEV;
911
912         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
913         if (!iter)
914                 return -ENOMEM;
915
916         mutex_lock(&ftrace_regex_lock);
917         if ((file->f_mode & FMODE_WRITE) &&
918             !(file->f_flags & O_APPEND))
919                 ftrace_filter_reset(enable);
920
921         if (file->f_mode & FMODE_READ) {
922                 iter->pg = ftrace_pages_start;
923                 iter->pos = -1;
924                 iter->flags = enable ? FTRACE_ITER_FILTER :
925                         FTRACE_ITER_NOTRACE;
926
927                 ret = seq_open(file, &show_ftrace_seq_ops);
928                 if (!ret) {
929                         struct seq_file *m = file->private_data;
930                         m->private = iter;
931                 } else
932                         kfree(iter);
933         } else
934                 file->private_data = iter;
935         mutex_unlock(&ftrace_regex_lock);
936
937         return ret;
938 }
939
940 static int
941 ftrace_filter_open(struct inode *inode, struct file *file)
942 {
943         return ftrace_regex_open(inode, file, 1);
944 }
945
946 static int
947 ftrace_notrace_open(struct inode *inode, struct file *file)
948 {
949         return ftrace_regex_open(inode, file, 0);
950 }
951
952 static ssize_t
953 ftrace_regex_read(struct file *file, char __user *ubuf,
954                        size_t cnt, loff_t *ppos)
955 {
956         if (file->f_mode & FMODE_READ)
957                 return seq_read(file, ubuf, cnt, ppos);
958         else
959                 return -EPERM;
960 }
961
962 static loff_t
963 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
964 {
965         loff_t ret;
966
967         if (file->f_mode & FMODE_READ)
968                 ret = seq_lseek(file, offset, origin);
969         else
970                 file->f_pos = ret = 1;
971
972         return ret;
973 }
974
975 enum {
976         MATCH_FULL,
977         MATCH_FRONT_ONLY,
978         MATCH_MIDDLE_ONLY,
979         MATCH_END_ONLY,
980 };
981
982 static void
983 ftrace_match(unsigned char *buff, int len, int enable)
984 {
985         char str[KSYM_SYMBOL_LEN];
986         char *search = NULL;
987         struct ftrace_page *pg;
988         struct dyn_ftrace *rec;
989         int type = MATCH_FULL;
990         unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
991         unsigned i, match = 0, search_len = 0;
992
993         for (i = 0; i < len; i++) {
994                 if (buff[i] == '*') {
995                         if (!i) {
996                                 search = buff + i + 1;
997                                 type = MATCH_END_ONLY;
998                                 search_len = len - (i + 1);
999                         } else {
1000                                 if (type == MATCH_END_ONLY) {
1001                                         type = MATCH_MIDDLE_ONLY;
1002                                 } else {
1003                                         match = i;
1004                                         type = MATCH_FRONT_ONLY;
1005                                 }
1006                                 buff[i] = 0;
1007                                 break;
1008                         }
1009                 }
1010         }
1011
1012         /* should not be called from interrupt context */
1013         spin_lock(&ftrace_lock);
1014         if (enable)
1015                 ftrace_filtered = 1;
1016         pg = ftrace_pages_start;
1017         while (pg) {
1018                 for (i = 0; i < pg->index; i++) {
1019                         int matched = 0;
1020                         char *ptr;
1021
1022                         rec = &pg->records[i];
1023                         if (rec->flags & FTRACE_FL_FAILED)
1024                                 continue;
1025                         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1026                         switch (type) {
1027                         case MATCH_FULL:
1028                                 if (strcmp(str, buff) == 0)
1029                                         matched = 1;
1030                                 break;
1031                         case MATCH_FRONT_ONLY:
1032                                 if (memcmp(str, buff, match) == 0)
1033                                         matched = 1;
1034                                 break;
1035                         case MATCH_MIDDLE_ONLY:
1036                                 if (strstr(str, search))
1037                                         matched = 1;
1038                                 break;
1039                         case MATCH_END_ONLY:
1040                                 ptr = strstr(str, search);
1041                                 if (ptr && (ptr[search_len] == 0))
1042                                         matched = 1;
1043                                 break;
1044                         }
1045                         if (matched)
1046                                 rec->flags |= flag;
1047                 }
1048                 pg = pg->next;
1049         }
1050         spin_unlock(&ftrace_lock);
1051 }
1052
1053 static ssize_t
1054 ftrace_regex_write(struct file *file, const char __user *ubuf,
1055                    size_t cnt, loff_t *ppos, int enable)
1056 {
1057         struct ftrace_iterator *iter;
1058         char ch;
1059         size_t read = 0;
1060         ssize_t ret;
1061
1062         if (!cnt || cnt < 0)
1063                 return 0;
1064
1065         mutex_lock(&ftrace_regex_lock);
1066
1067         if (file->f_mode & FMODE_READ) {
1068                 struct seq_file *m = file->private_data;
1069                 iter = m->private;
1070         } else
1071                 iter = file->private_data;
1072
1073         if (!*ppos) {
1074                 iter->flags &= ~FTRACE_ITER_CONT;
1075                 iter->buffer_idx = 0;
1076         }
1077
1078         ret = get_user(ch, ubuf++);
1079         if (ret)
1080                 goto out;
1081         read++;
1082         cnt--;
1083
1084         if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1085                 /* skip white space */
1086                 while (cnt && isspace(ch)) {
1087                         ret = get_user(ch, ubuf++);
1088                         if (ret)
1089                                 goto out;
1090                         read++;
1091                         cnt--;
1092                 }
1093
1094                 if (isspace(ch)) {
1095                         file->f_pos += read;
1096                         ret = read;
1097                         goto out;
1098                 }
1099
1100                 iter->buffer_idx = 0;
1101         }
1102
1103         while (cnt && !isspace(ch)) {
1104                 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1105                         iter->buffer[iter->buffer_idx++] = ch;
1106                 else {
1107                         ret = -EINVAL;
1108                         goto out;
1109                 }
1110                 ret = get_user(ch, ubuf++);
1111                 if (ret)
1112                         goto out;
1113                 read++;
1114                 cnt--;
1115         }
1116
1117         if (isspace(ch)) {
1118                 iter->filtered++;
1119                 iter->buffer[iter->buffer_idx] = 0;
1120                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1121                 iter->buffer_idx = 0;
1122         } else
1123                 iter->flags |= FTRACE_ITER_CONT;
1124
1125
1126         file->f_pos += read;
1127
1128         ret = read;
1129  out:
1130         mutex_unlock(&ftrace_regex_lock);
1131
1132         return ret;
1133 }
1134
1135 static ssize_t
1136 ftrace_filter_write(struct file *file, const char __user *ubuf,
1137                     size_t cnt, loff_t *ppos)
1138 {
1139         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1140 }
1141
1142 static ssize_t
1143 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1144                      size_t cnt, loff_t *ppos)
1145 {
1146         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1147 }
1148
1149 static void
1150 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1151 {
1152         if (unlikely(ftrace_disabled))
1153                 return;
1154
1155         mutex_lock(&ftrace_regex_lock);
1156         if (reset)
1157                 ftrace_filter_reset(enable);
1158         if (buf)
1159                 ftrace_match(buf, len, enable);
1160         mutex_unlock(&ftrace_regex_lock);
1161 }
1162
1163 /**
1164  * ftrace_set_filter - set a function to filter on in ftrace
1165  * @buf - the string that holds the function filter text.
1166  * @len - the length of the string.
1167  * @reset - non zero to reset all filters before applying this filter.
1168  *
1169  * Filters denote which functions should be enabled when tracing is enabled.
1170  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1171  */
1172 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1173 {
1174         ftrace_set_regex(buf, len, reset, 1);
1175 }
1176
1177 /**
1178  * ftrace_set_notrace - set a function to not trace in ftrace
1179  * @buf - the string that holds the function notrace text.
1180  * @len - the length of the string.
1181  * @reset - non zero to reset all filters before applying this filter.
1182  *
1183  * Notrace Filters denote which functions should not be enabled when tracing
1184  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1185  * for tracing.
1186  */
1187 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1188 {
1189         ftrace_set_regex(buf, len, reset, 0);
1190 }
1191
1192 static int
1193 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1194 {
1195         struct seq_file *m = (struct seq_file *)file->private_data;
1196         struct ftrace_iterator *iter;
1197
1198         mutex_lock(&ftrace_regex_lock);
1199         if (file->f_mode & FMODE_READ) {
1200                 iter = m->private;
1201
1202                 seq_release(inode, file);
1203         } else
1204                 iter = file->private_data;
1205
1206         if (iter->buffer_idx) {
1207                 iter->filtered++;
1208                 iter->buffer[iter->buffer_idx] = 0;
1209                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1210         }
1211
1212         mutex_lock(&ftrace_sysctl_lock);
1213         mutex_lock(&ftrace_start_lock);
1214         if (iter->filtered && ftrace_start_up && ftrace_enabled)
1215                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1216         mutex_unlock(&ftrace_start_lock);
1217         mutex_unlock(&ftrace_sysctl_lock);
1218
1219         kfree(iter);
1220         mutex_unlock(&ftrace_regex_lock);
1221         return 0;
1222 }
1223
1224 static int
1225 ftrace_filter_release(struct inode *inode, struct file *file)
1226 {
1227         return ftrace_regex_release(inode, file, 1);
1228 }
1229
1230 static int
1231 ftrace_notrace_release(struct inode *inode, struct file *file)
1232 {
1233         return ftrace_regex_release(inode, file, 0);
1234 }
1235
1236 static struct file_operations ftrace_avail_fops = {
1237         .open = ftrace_avail_open,
1238         .read = seq_read,
1239         .llseek = seq_lseek,
1240         .release = ftrace_avail_release,
1241 };
1242
1243 static struct file_operations ftrace_failures_fops = {
1244         .open = ftrace_failures_open,
1245         .read = seq_read,
1246         .llseek = seq_lseek,
1247         .release = ftrace_avail_release,
1248 };
1249
1250 static struct file_operations ftrace_filter_fops = {
1251         .open = ftrace_filter_open,
1252         .read = ftrace_regex_read,
1253         .write = ftrace_filter_write,
1254         .llseek = ftrace_regex_lseek,
1255         .release = ftrace_filter_release,
1256 };
1257
1258 static struct file_operations ftrace_notrace_fops = {
1259         .open = ftrace_notrace_open,
1260         .read = ftrace_regex_read,
1261         .write = ftrace_notrace_write,
1262         .llseek = ftrace_regex_lseek,
1263         .release = ftrace_notrace_release,
1264 };
1265
1266 static __init int ftrace_init_debugfs(void)
1267 {
1268         struct dentry *d_tracer;
1269         struct dentry *entry;
1270
1271         d_tracer = tracing_init_dentry();
1272
1273         entry = debugfs_create_file("available_filter_functions", 0444,
1274                                     d_tracer, NULL, &ftrace_avail_fops);
1275         if (!entry)
1276                 pr_warning("Could not create debugfs "
1277                            "'available_filter_functions' entry\n");
1278
1279         entry = debugfs_create_file("failures", 0444,
1280                                     d_tracer, NULL, &ftrace_failures_fops);
1281         if (!entry)
1282                 pr_warning("Could not create debugfs 'failures' entry\n");
1283
1284         entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1285                                     NULL, &ftrace_filter_fops);
1286         if (!entry)
1287                 pr_warning("Could not create debugfs "
1288                            "'set_ftrace_filter' entry\n");
1289
1290         entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1291                                     NULL, &ftrace_notrace_fops);
1292         if (!entry)
1293                 pr_warning("Could not create debugfs "
1294                            "'set_ftrace_notrace' entry\n");
1295
1296         return 0;
1297 }
1298
1299 fs_initcall(ftrace_init_debugfs);
1300
1301 static int ftrace_convert_nops(unsigned long *start,
1302                                unsigned long *end)
1303 {
1304         unsigned long *p;
1305         unsigned long addr;
1306         unsigned long flags;
1307
1308         mutex_lock(&ftrace_start_lock);
1309         p = start;
1310         while (p < end) {
1311                 addr = ftrace_call_adjust(*p++);
1312                 ftrace_record_ip(addr);
1313         }
1314
1315         /* disable interrupts to prevent kstop machine */
1316         local_irq_save(flags);
1317         ftrace_update_code();
1318         local_irq_restore(flags);
1319         mutex_unlock(&ftrace_start_lock);
1320
1321         return 0;
1322 }
1323
1324 void ftrace_init_module(unsigned long *start, unsigned long *end)
1325 {
1326         if (ftrace_disabled || start == end)
1327                 return;
1328         ftrace_convert_nops(start, end);
1329 }
1330
1331 extern unsigned long __start_mcount_loc[];
1332 extern unsigned long __stop_mcount_loc[];
1333
1334 void __init ftrace_init(void)
1335 {
1336         unsigned long count, addr, flags;
1337         int ret;
1338
1339         /* Keep the ftrace pointer to the stub */
1340         addr = (unsigned long)ftrace_stub;
1341
1342         local_irq_save(flags);
1343         ftrace_dyn_arch_init(&addr);
1344         local_irq_restore(flags);
1345
1346         /* ftrace_dyn_arch_init places the return code in addr */
1347         if (addr)
1348                 goto failed;
1349
1350         count = __stop_mcount_loc - __start_mcount_loc;
1351
1352         ret = ftrace_dyn_table_alloc(count);
1353         if (ret)
1354                 goto failed;
1355
1356         last_ftrace_enabled = ftrace_enabled = 1;
1357
1358         ret = ftrace_convert_nops(__start_mcount_loc,
1359                                   __stop_mcount_loc);
1360
1361         return;
1362  failed:
1363         ftrace_disabled = 1;
1364 }
1365
1366 #else
1367
1368 static int __init ftrace_nodyn_init(void)
1369 {
1370         ftrace_enabled = 1;
1371         return 0;
1372 }
1373 device_initcall(ftrace_nodyn_init);
1374
1375 # define ftrace_startup()               do { } while (0)
1376 # define ftrace_shutdown()              do { } while (0)
1377 # define ftrace_startup_sysctl()        do { } while (0)
1378 # define ftrace_shutdown_sysctl()       do { } while (0)
1379 #endif /* CONFIG_DYNAMIC_FTRACE */
1380
1381 /**
1382  * ftrace_kill - kill ftrace
1383  *
1384  * This function should be used by panic code. It stops ftrace
1385  * but in a not so nice way. If you need to simply kill ftrace
1386  * from a non-atomic section, use ftrace_kill.
1387  */
1388 void ftrace_kill(void)
1389 {
1390         ftrace_disabled = 1;
1391         ftrace_enabled = 0;
1392         clear_ftrace_function();
1393 }
1394
1395 /**
1396  * register_ftrace_function - register a function for profiling
1397  * @ops - ops structure that holds the function for profiling.
1398  *
1399  * Register a function to be called by all functions in the
1400  * kernel.
1401  *
1402  * Note: @ops->func and all the functions it calls must be labeled
1403  *       with "notrace", otherwise it will go into a
1404  *       recursive loop.
1405  */
1406 int register_ftrace_function(struct ftrace_ops *ops)
1407 {
1408         int ret;
1409
1410         if (unlikely(ftrace_disabled))
1411                 return -1;
1412
1413         mutex_lock(&ftrace_sysctl_lock);
1414         ret = __register_ftrace_function(ops);
1415         ftrace_startup();
1416         mutex_unlock(&ftrace_sysctl_lock);
1417
1418         return ret;
1419 }
1420
1421 /**
1422  * unregister_ftrace_function - unresgister a function for profiling.
1423  * @ops - ops structure that holds the function to unregister
1424  *
1425  * Unregister a function that was added to be called by ftrace profiling.
1426  */
1427 int unregister_ftrace_function(struct ftrace_ops *ops)
1428 {
1429         int ret;
1430
1431         mutex_lock(&ftrace_sysctl_lock);
1432         ret = __unregister_ftrace_function(ops);
1433         ftrace_shutdown();
1434         mutex_unlock(&ftrace_sysctl_lock);
1435
1436         return ret;
1437 }
1438
1439 int
1440 ftrace_enable_sysctl(struct ctl_table *table, int write,
1441                      struct file *file, void __user *buffer, size_t *lenp,
1442                      loff_t *ppos)
1443 {
1444         int ret;
1445
1446         if (unlikely(ftrace_disabled))
1447                 return -ENODEV;
1448
1449         mutex_lock(&ftrace_sysctl_lock);
1450
1451         ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
1452
1453         if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1454                 goto out;
1455
1456         last_ftrace_enabled = ftrace_enabled;
1457
1458         if (ftrace_enabled) {
1459
1460                 ftrace_startup_sysctl();
1461
1462                 /* we are starting ftrace again */
1463                 if (ftrace_list != &ftrace_list_end) {
1464                         if (ftrace_list->next == &ftrace_list_end)
1465                                 ftrace_trace_function = ftrace_list->func;
1466                         else
1467                                 ftrace_trace_function = ftrace_list_func;
1468                 }
1469
1470         } else {
1471                 /* stopping ftrace calls (just send to ftrace_stub) */
1472                 ftrace_trace_function = ftrace_stub;
1473
1474                 ftrace_shutdown_sysctl();
1475         }
1476
1477  out:
1478         mutex_unlock(&ftrace_sysctl_lock);
1479         return ret;
1480 }
1481
1482 #ifdef CONFIG_FUNCTION_RET_TRACER
1483 trace_function_return_t ftrace_function_return =
1484                         (trace_function_return_t)ftrace_stub;
1485 void register_ftrace_return(trace_function_return_t func)
1486 {
1487         ftrace_function_return = func;
1488 }
1489
1490 void unregister_ftrace_return(void)
1491 {
1492         ftrace_function_return = (trace_function_return_t)ftrace_stub;
1493 }
1494 #endif
1495
1496
1497