ftrace: trace different functions with a different tracer
[safe/jmp/linux-2.6] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/kprobes.h>
26 #include <linux/ftrace.h>
27 #include <linux/sysctl.h>
28 #include <linux/ctype.h>
29 #include <linux/list.h>
30 #include <linux/hash.h>
31
32 #include <asm/ftrace.h>
33
34 #include "trace.h"
35
36 #define FTRACE_WARN_ON(cond)                    \
37         do {                                    \
38                 if (WARN_ON(cond))              \
39                         ftrace_kill();          \
40         } while (0)
41
42 #define FTRACE_WARN_ON_ONCE(cond)               \
43         do {                                    \
44                 if (WARN_ON_ONCE(cond))         \
45                         ftrace_kill();          \
46         } while (0)
47
48 /* ftrace_enabled is a method to turn ftrace on or off */
49 int ftrace_enabled __read_mostly;
50 static int last_ftrace_enabled;
51
52 /* set when tracing only a pid */
53 struct pid *ftrace_pid_trace;
54 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
55
56 /* Quick disabling of function tracer. */
57 int function_trace_stop;
58
59 /*
60  * ftrace_disabled is set when an anomaly is discovered.
61  * ftrace_disabled is much stronger than ftrace_enabled.
62  */
63 static int ftrace_disabled __read_mostly;
64
65 static DEFINE_MUTEX(ftrace_lock);
66
67 static struct ftrace_ops ftrace_list_end __read_mostly =
68 {
69         .func = ftrace_stub,
70 };
71
72 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
73 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
74 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
75 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
76
77 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
78 {
79         struct ftrace_ops *op = ftrace_list;
80
81         /* in case someone actually ports this to alpha! */
82         read_barrier_depends();
83
84         while (op != &ftrace_list_end) {
85                 /* silly alpha */
86                 read_barrier_depends();
87                 op->func(ip, parent_ip);
88                 op = op->next;
89         };
90 }
91
92 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
93 {
94         if (!test_tsk_trace_trace(current))
95                 return;
96
97         ftrace_pid_function(ip, parent_ip);
98 }
99
100 static void set_ftrace_pid_function(ftrace_func_t func)
101 {
102         /* do not set ftrace_pid_function to itself! */
103         if (func != ftrace_pid_func)
104                 ftrace_pid_function = func;
105 }
106
107 /**
108  * clear_ftrace_function - reset the ftrace function
109  *
110  * This NULLs the ftrace function and in essence stops
111  * tracing.  There may be lag
112  */
113 void clear_ftrace_function(void)
114 {
115         ftrace_trace_function = ftrace_stub;
116         __ftrace_trace_function = ftrace_stub;
117         ftrace_pid_function = ftrace_stub;
118 }
119
120 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
121 /*
122  * For those archs that do not test ftrace_trace_stop in their
123  * mcount call site, we need to do it from C.
124  */
125 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
126 {
127         if (function_trace_stop)
128                 return;
129
130         __ftrace_trace_function(ip, parent_ip);
131 }
132 #endif
133
134 static int __register_ftrace_function(struct ftrace_ops *ops)
135 {
136         ops->next = ftrace_list;
137         /*
138          * We are entering ops into the ftrace_list but another
139          * CPU might be walking that list. We need to make sure
140          * the ops->next pointer is valid before another CPU sees
141          * the ops pointer included into the ftrace_list.
142          */
143         smp_wmb();
144         ftrace_list = ops;
145
146         if (ftrace_enabled) {
147                 ftrace_func_t func;
148
149                 if (ops->next == &ftrace_list_end)
150                         func = ops->func;
151                 else
152                         func = ftrace_list_func;
153
154                 if (ftrace_pid_trace) {
155                         set_ftrace_pid_function(func);
156                         func = ftrace_pid_func;
157                 }
158
159                 /*
160                  * For one func, simply call it directly.
161                  * For more than one func, call the chain.
162                  */
163 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
164                 ftrace_trace_function = func;
165 #else
166                 __ftrace_trace_function = func;
167                 ftrace_trace_function = ftrace_test_stop_func;
168 #endif
169         }
170
171         return 0;
172 }
173
174 static int __unregister_ftrace_function(struct ftrace_ops *ops)
175 {
176         struct ftrace_ops **p;
177
178         /*
179          * If we are removing the last function, then simply point
180          * to the ftrace_stub.
181          */
182         if (ftrace_list == ops && ops->next == &ftrace_list_end) {
183                 ftrace_trace_function = ftrace_stub;
184                 ftrace_list = &ftrace_list_end;
185                 return 0;
186         }
187
188         for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
189                 if (*p == ops)
190                         break;
191
192         if (*p != ops)
193                 return -1;
194
195         *p = (*p)->next;
196
197         if (ftrace_enabled) {
198                 /* If we only have one func left, then call that directly */
199                 if (ftrace_list->next == &ftrace_list_end) {
200                         ftrace_func_t func = ftrace_list->func;
201
202                         if (ftrace_pid_trace) {
203                                 set_ftrace_pid_function(func);
204                                 func = ftrace_pid_func;
205                         }
206 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
207                         ftrace_trace_function = func;
208 #else
209                         __ftrace_trace_function = func;
210 #endif
211                 }
212         }
213
214         return 0;
215 }
216
217 static void ftrace_update_pid_func(void)
218 {
219         ftrace_func_t func;
220
221         mutex_lock(&ftrace_lock);
222
223         if (ftrace_trace_function == ftrace_stub)
224                 goto out;
225
226         func = ftrace_trace_function;
227
228         if (ftrace_pid_trace) {
229                 set_ftrace_pid_function(func);
230                 func = ftrace_pid_func;
231         } else {
232                 if (func == ftrace_pid_func)
233                         func = ftrace_pid_function;
234         }
235
236 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
237         ftrace_trace_function = func;
238 #else
239         __ftrace_trace_function = func;
240 #endif
241
242  out:
243         mutex_unlock(&ftrace_lock);
244 }
245
246 #ifdef CONFIG_DYNAMIC_FTRACE
247 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
248 # error Dynamic ftrace depends on MCOUNT_RECORD
249 #endif
250
251 enum {
252         FTRACE_ENABLE_CALLS             = (1 << 0),
253         FTRACE_DISABLE_CALLS            = (1 << 1),
254         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
255         FTRACE_ENABLE_MCOUNT            = (1 << 3),
256         FTRACE_DISABLE_MCOUNT           = (1 << 4),
257         FTRACE_START_FUNC_RET           = (1 << 5),
258         FTRACE_STOP_FUNC_RET            = (1 << 6),
259 };
260
261 static int ftrace_filtered;
262
263 static LIST_HEAD(ftrace_new_addrs);
264
265 static DEFINE_MUTEX(ftrace_regex_lock);
266
267 struct ftrace_page {
268         struct ftrace_page      *next;
269         int                     index;
270         struct dyn_ftrace       records[];
271 };
272
273 #define ENTRIES_PER_PAGE \
274   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
275
276 /* estimate from running different kernels */
277 #define NR_TO_INIT              10000
278
279 static struct ftrace_page       *ftrace_pages_start;
280 static struct ftrace_page       *ftrace_pages;
281
282 static struct dyn_ftrace *ftrace_free_records;
283
284 /*
285  * This is a double for. Do not use 'break' to break out of the loop,
286  * you must use a goto.
287  */
288 #define do_for_each_ftrace_rec(pg, rec)                                 \
289         for (pg = ftrace_pages_start; pg; pg = pg->next) {              \
290                 int _____i;                                             \
291                 for (_____i = 0; _____i < pg->index; _____i++) {        \
292                         rec = &pg->records[_____i];
293
294 #define while_for_each_ftrace_rec()             \
295                 }                               \
296         }
297
298 #ifdef CONFIG_KPROBES
299
300 static int frozen_record_count;
301
302 static inline void freeze_record(struct dyn_ftrace *rec)
303 {
304         if (!(rec->flags & FTRACE_FL_FROZEN)) {
305                 rec->flags |= FTRACE_FL_FROZEN;
306                 frozen_record_count++;
307         }
308 }
309
310 static inline void unfreeze_record(struct dyn_ftrace *rec)
311 {
312         if (rec->flags & FTRACE_FL_FROZEN) {
313                 rec->flags &= ~FTRACE_FL_FROZEN;
314                 frozen_record_count--;
315         }
316 }
317
318 static inline int record_frozen(struct dyn_ftrace *rec)
319 {
320         return rec->flags & FTRACE_FL_FROZEN;
321 }
322 #else
323 # define freeze_record(rec)                     ({ 0; })
324 # define unfreeze_record(rec)                   ({ 0; })
325 # define record_frozen(rec)                     ({ 0; })
326 #endif /* CONFIG_KPROBES */
327
328 static void ftrace_free_rec(struct dyn_ftrace *rec)
329 {
330         rec->ip = (unsigned long)ftrace_free_records;
331         ftrace_free_records = rec;
332         rec->flags |= FTRACE_FL_FREE;
333 }
334
335 void ftrace_release(void *start, unsigned long size)
336 {
337         struct dyn_ftrace *rec;
338         struct ftrace_page *pg;
339         unsigned long s = (unsigned long)start;
340         unsigned long e = s + size;
341
342         if (ftrace_disabled || !start)
343                 return;
344
345         mutex_lock(&ftrace_lock);
346         do_for_each_ftrace_rec(pg, rec) {
347                 if ((rec->ip >= s) && (rec->ip < e))
348                         ftrace_free_rec(rec);
349         } while_for_each_ftrace_rec();
350         mutex_unlock(&ftrace_lock);
351 }
352
353 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
354 {
355         struct dyn_ftrace *rec;
356
357         /* First check for freed records */
358         if (ftrace_free_records) {
359                 rec = ftrace_free_records;
360
361                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
362                         FTRACE_WARN_ON_ONCE(1);
363                         ftrace_free_records = NULL;
364                         return NULL;
365                 }
366
367                 ftrace_free_records = (void *)rec->ip;
368                 memset(rec, 0, sizeof(*rec));
369                 return rec;
370         }
371
372         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
373                 if (!ftrace_pages->next) {
374                         /* allocate another page */
375                         ftrace_pages->next =
376                                 (void *)get_zeroed_page(GFP_KERNEL);
377                         if (!ftrace_pages->next)
378                                 return NULL;
379                 }
380                 ftrace_pages = ftrace_pages->next;
381         }
382
383         return &ftrace_pages->records[ftrace_pages->index++];
384 }
385
386 static struct dyn_ftrace *
387 ftrace_record_ip(unsigned long ip)
388 {
389         struct dyn_ftrace *rec;
390
391         if (ftrace_disabled)
392                 return NULL;
393
394         rec = ftrace_alloc_dyn_node(ip);
395         if (!rec)
396                 return NULL;
397
398         rec->ip = ip;
399
400         list_add(&rec->list, &ftrace_new_addrs);
401
402         return rec;
403 }
404
405 static void print_ip_ins(const char *fmt, unsigned char *p)
406 {
407         int i;
408
409         printk(KERN_CONT "%s", fmt);
410
411         for (i = 0; i < MCOUNT_INSN_SIZE; i++)
412                 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
413 }
414
415 static void ftrace_bug(int failed, unsigned long ip)
416 {
417         switch (failed) {
418         case -EFAULT:
419                 FTRACE_WARN_ON_ONCE(1);
420                 pr_info("ftrace faulted on modifying ");
421                 print_ip_sym(ip);
422                 break;
423         case -EINVAL:
424                 FTRACE_WARN_ON_ONCE(1);
425                 pr_info("ftrace failed to modify ");
426                 print_ip_sym(ip);
427                 print_ip_ins(" actual: ", (unsigned char *)ip);
428                 printk(KERN_CONT "\n");
429                 break;
430         case -EPERM:
431                 FTRACE_WARN_ON_ONCE(1);
432                 pr_info("ftrace faulted on writing ");
433                 print_ip_sym(ip);
434                 break;
435         default:
436                 FTRACE_WARN_ON_ONCE(1);
437                 pr_info("ftrace faulted on unknown error ");
438                 print_ip_sym(ip);
439         }
440 }
441
442
443 static int
444 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
445 {
446         unsigned long ip, fl;
447         unsigned long ftrace_addr;
448
449         ftrace_addr = (unsigned long)FTRACE_ADDR;
450
451         ip = rec->ip;
452
453         /*
454          * If this record is not to be traced and
455          * it is not enabled then do nothing.
456          *
457          * If this record is not to be traced and
458          * it is enabled then disable it.
459          *
460          */
461         if (rec->flags & FTRACE_FL_NOTRACE) {
462                 if (rec->flags & FTRACE_FL_ENABLED)
463                         rec->flags &= ~FTRACE_FL_ENABLED;
464                 else
465                         return 0;
466
467         } else if (ftrace_filtered && enable) {
468                 /*
469                  * Filtering is on:
470                  */
471
472                 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
473
474                 /* Record is filtered and enabled, do nothing */
475                 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
476                         return 0;
477
478                 /* Record is not filtered or enabled, do nothing */
479                 if (!fl)
480                         return 0;
481
482                 /* Record is not filtered but enabled, disable it */
483                 if (fl == FTRACE_FL_ENABLED)
484                         rec->flags &= ~FTRACE_FL_ENABLED;
485                 else
486                 /* Otherwise record is filtered but not enabled, enable it */
487                         rec->flags |= FTRACE_FL_ENABLED;
488         } else {
489                 /* Disable or not filtered */
490
491                 if (enable) {
492                         /* if record is enabled, do nothing */
493                         if (rec->flags & FTRACE_FL_ENABLED)
494                                 return 0;
495
496                         rec->flags |= FTRACE_FL_ENABLED;
497
498                 } else {
499
500                         /* if record is not enabled, do nothing */
501                         if (!(rec->flags & FTRACE_FL_ENABLED))
502                                 return 0;
503
504                         rec->flags &= ~FTRACE_FL_ENABLED;
505                 }
506         }
507
508         if (rec->flags & FTRACE_FL_ENABLED)
509                 return ftrace_make_call(rec, ftrace_addr);
510         else
511                 return ftrace_make_nop(NULL, rec, ftrace_addr);
512 }
513
514 static void ftrace_replace_code(int enable)
515 {
516         int failed;
517         struct dyn_ftrace *rec;
518         struct ftrace_page *pg;
519
520         do_for_each_ftrace_rec(pg, rec) {
521                 /*
522                  * Skip over free records and records that have
523                  * failed.
524                  */
525                 if (rec->flags & FTRACE_FL_FREE ||
526                     rec->flags & FTRACE_FL_FAILED)
527                         continue;
528
529                 /* ignore updates to this record's mcount site */
530                 if (get_kprobe((void *)rec->ip)) {
531                         freeze_record(rec);
532                         continue;
533                 } else {
534                         unfreeze_record(rec);
535                 }
536
537                 failed = __ftrace_replace_code(rec, enable);
538                 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
539                         rec->flags |= FTRACE_FL_FAILED;
540                         if ((system_state == SYSTEM_BOOTING) ||
541                             !core_kernel_text(rec->ip)) {
542                                 ftrace_free_rec(rec);
543                         } else
544                                 ftrace_bug(failed, rec->ip);
545                 }
546         } while_for_each_ftrace_rec();
547 }
548
549 static int
550 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
551 {
552         unsigned long ip;
553         int ret;
554
555         ip = rec->ip;
556
557         ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
558         if (ret) {
559                 ftrace_bug(ret, ip);
560                 rec->flags |= FTRACE_FL_FAILED;
561                 return 0;
562         }
563         return 1;
564 }
565
566 static int __ftrace_modify_code(void *data)
567 {
568         int *command = data;
569
570         if (*command & FTRACE_ENABLE_CALLS)
571                 ftrace_replace_code(1);
572         else if (*command & FTRACE_DISABLE_CALLS)
573                 ftrace_replace_code(0);
574
575         if (*command & FTRACE_UPDATE_TRACE_FUNC)
576                 ftrace_update_ftrace_func(ftrace_trace_function);
577
578         if (*command & FTRACE_START_FUNC_RET)
579                 ftrace_enable_ftrace_graph_caller();
580         else if (*command & FTRACE_STOP_FUNC_RET)
581                 ftrace_disable_ftrace_graph_caller();
582
583         return 0;
584 }
585
586 static void ftrace_run_update_code(int command)
587 {
588         stop_machine(__ftrace_modify_code, &command, NULL);
589 }
590
591 static ftrace_func_t saved_ftrace_func;
592 static int ftrace_start_up;
593
594 static void ftrace_startup_enable(int command)
595 {
596         if (saved_ftrace_func != ftrace_trace_function) {
597                 saved_ftrace_func = ftrace_trace_function;
598                 command |= FTRACE_UPDATE_TRACE_FUNC;
599         }
600
601         if (!command || !ftrace_enabled)
602                 return;
603
604         ftrace_run_update_code(command);
605 }
606
607 static void ftrace_startup(int command)
608 {
609         if (unlikely(ftrace_disabled))
610                 return;
611
612         ftrace_start_up++;
613         command |= FTRACE_ENABLE_CALLS;
614
615         ftrace_startup_enable(command);
616 }
617
618 static void ftrace_shutdown(int command)
619 {
620         if (unlikely(ftrace_disabled))
621                 return;
622
623         ftrace_start_up--;
624         if (!ftrace_start_up)
625                 command |= FTRACE_DISABLE_CALLS;
626
627         if (saved_ftrace_func != ftrace_trace_function) {
628                 saved_ftrace_func = ftrace_trace_function;
629                 command |= FTRACE_UPDATE_TRACE_FUNC;
630         }
631
632         if (!command || !ftrace_enabled)
633                 return;
634
635         ftrace_run_update_code(command);
636 }
637
638 static void ftrace_startup_sysctl(void)
639 {
640         int command = FTRACE_ENABLE_MCOUNT;
641
642         if (unlikely(ftrace_disabled))
643                 return;
644
645         /* Force update next time */
646         saved_ftrace_func = NULL;
647         /* ftrace_start_up is true if we want ftrace running */
648         if (ftrace_start_up)
649                 command |= FTRACE_ENABLE_CALLS;
650
651         ftrace_run_update_code(command);
652 }
653
654 static void ftrace_shutdown_sysctl(void)
655 {
656         int command = FTRACE_DISABLE_MCOUNT;
657
658         if (unlikely(ftrace_disabled))
659                 return;
660
661         /* ftrace_start_up is true if ftrace is running */
662         if (ftrace_start_up)
663                 command |= FTRACE_DISABLE_CALLS;
664
665         ftrace_run_update_code(command);
666 }
667
668 static cycle_t          ftrace_update_time;
669 static unsigned long    ftrace_update_cnt;
670 unsigned long           ftrace_update_tot_cnt;
671
672 static int ftrace_update_code(struct module *mod)
673 {
674         struct dyn_ftrace *p, *t;
675         cycle_t start, stop;
676
677         start = ftrace_now(raw_smp_processor_id());
678         ftrace_update_cnt = 0;
679
680         list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
681
682                 /* If something went wrong, bail without enabling anything */
683                 if (unlikely(ftrace_disabled))
684                         return -1;
685
686                 list_del_init(&p->list);
687
688                 /* convert record (i.e, patch mcount-call with NOP) */
689                 if (ftrace_code_disable(mod, p)) {
690                         p->flags |= FTRACE_FL_CONVERTED;
691                         ftrace_update_cnt++;
692                 } else
693                         ftrace_free_rec(p);
694         }
695
696         stop = ftrace_now(raw_smp_processor_id());
697         ftrace_update_time = stop - start;
698         ftrace_update_tot_cnt += ftrace_update_cnt;
699
700         return 0;
701 }
702
703 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
704 {
705         struct ftrace_page *pg;
706         int cnt;
707         int i;
708
709         /* allocate a few pages */
710         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
711         if (!ftrace_pages_start)
712                 return -1;
713
714         /*
715          * Allocate a few more pages.
716          *
717          * TODO: have some parser search vmlinux before
718          *   final linking to find all calls to ftrace.
719          *   Then we can:
720          *    a) know how many pages to allocate.
721          *     and/or
722          *    b) set up the table then.
723          *
724          *  The dynamic code is still necessary for
725          *  modules.
726          */
727
728         pg = ftrace_pages = ftrace_pages_start;
729
730         cnt = num_to_init / ENTRIES_PER_PAGE;
731         pr_info("ftrace: allocating %ld entries in %d pages\n",
732                 num_to_init, cnt + 1);
733
734         for (i = 0; i < cnt; i++) {
735                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
736
737                 /* If we fail, we'll try later anyway */
738                 if (!pg->next)
739                         break;
740
741                 pg = pg->next;
742         }
743
744         return 0;
745 }
746
747 enum {
748         FTRACE_ITER_FILTER      = (1 << 0),
749         FTRACE_ITER_CONT        = (1 << 1),
750         FTRACE_ITER_NOTRACE     = (1 << 2),
751         FTRACE_ITER_FAILURES    = (1 << 3),
752         FTRACE_ITER_PRINTALL    = (1 << 4),
753 };
754
755 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
756
757 struct ftrace_iterator {
758         struct ftrace_page      *pg;
759         int                     idx;
760         unsigned                flags;
761         unsigned char           buffer[FTRACE_BUFF_MAX+1];
762         unsigned                buffer_idx;
763         unsigned                filtered;
764 };
765
766 static void *
767 t_next(struct seq_file *m, void *v, loff_t *pos)
768 {
769         struct ftrace_iterator *iter = m->private;
770         struct dyn_ftrace *rec = NULL;
771
772         (*pos)++;
773
774         if (iter->flags & FTRACE_ITER_PRINTALL)
775                 return NULL;
776
777         mutex_lock(&ftrace_lock);
778  retry:
779         if (iter->idx >= iter->pg->index) {
780                 if (iter->pg->next) {
781                         iter->pg = iter->pg->next;
782                         iter->idx = 0;
783                         goto retry;
784                 } else {
785                         iter->idx = -1;
786                 }
787         } else {
788                 rec = &iter->pg->records[iter->idx++];
789                 if ((rec->flags & FTRACE_FL_FREE) ||
790
791                     (!(iter->flags & FTRACE_ITER_FAILURES) &&
792                      (rec->flags & FTRACE_FL_FAILED)) ||
793
794                     ((iter->flags & FTRACE_ITER_FAILURES) &&
795                      !(rec->flags & FTRACE_FL_FAILED)) ||
796
797                     ((iter->flags & FTRACE_ITER_FILTER) &&
798                      !(rec->flags & FTRACE_FL_FILTER)) ||
799
800                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
801                      !(rec->flags & FTRACE_FL_NOTRACE))) {
802                         rec = NULL;
803                         goto retry;
804                 }
805         }
806         mutex_unlock(&ftrace_lock);
807
808         return rec;
809 }
810
811 static void *t_start(struct seq_file *m, loff_t *pos)
812 {
813         struct ftrace_iterator *iter = m->private;
814         void *p = NULL;
815
816         /*
817          * For set_ftrace_filter reading, if we have the filter
818          * off, we can short cut and just print out that all
819          * functions are enabled.
820          */
821         if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
822                 if (*pos > 0)
823                         return NULL;
824                 iter->flags |= FTRACE_ITER_PRINTALL;
825                 (*pos)++;
826                 return iter;
827         }
828
829         if (*pos > 0) {
830                 if (iter->idx < 0)
831                         return p;
832                 (*pos)--;
833                 iter->idx--;
834         }
835
836         p = t_next(m, p, pos);
837
838         return p;
839 }
840
841 static void t_stop(struct seq_file *m, void *p)
842 {
843 }
844
845 static int t_show(struct seq_file *m, void *v)
846 {
847         struct ftrace_iterator *iter = m->private;
848         struct dyn_ftrace *rec = v;
849         char str[KSYM_SYMBOL_LEN];
850
851         if (iter->flags & FTRACE_ITER_PRINTALL) {
852                 seq_printf(m, "#### all functions enabled ####\n");
853                 return 0;
854         }
855
856         if (!rec)
857                 return 0;
858
859         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
860
861         seq_printf(m, "%s\n", str);
862
863         return 0;
864 }
865
866 static struct seq_operations show_ftrace_seq_ops = {
867         .start = t_start,
868         .next = t_next,
869         .stop = t_stop,
870         .show = t_show,
871 };
872
873 static int
874 ftrace_avail_open(struct inode *inode, struct file *file)
875 {
876         struct ftrace_iterator *iter;
877         int ret;
878
879         if (unlikely(ftrace_disabled))
880                 return -ENODEV;
881
882         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
883         if (!iter)
884                 return -ENOMEM;
885
886         iter->pg = ftrace_pages_start;
887
888         ret = seq_open(file, &show_ftrace_seq_ops);
889         if (!ret) {
890                 struct seq_file *m = file->private_data;
891
892                 m->private = iter;
893         } else {
894                 kfree(iter);
895         }
896
897         return ret;
898 }
899
900 int ftrace_avail_release(struct inode *inode, struct file *file)
901 {
902         struct seq_file *m = (struct seq_file *)file->private_data;
903         struct ftrace_iterator *iter = m->private;
904
905         seq_release(inode, file);
906         kfree(iter);
907
908         return 0;
909 }
910
911 static int
912 ftrace_failures_open(struct inode *inode, struct file *file)
913 {
914         int ret;
915         struct seq_file *m;
916         struct ftrace_iterator *iter;
917
918         ret = ftrace_avail_open(inode, file);
919         if (!ret) {
920                 m = (struct seq_file *)file->private_data;
921                 iter = (struct ftrace_iterator *)m->private;
922                 iter->flags = FTRACE_ITER_FAILURES;
923         }
924
925         return ret;
926 }
927
928
929 static void ftrace_filter_reset(int enable)
930 {
931         struct ftrace_page *pg;
932         struct dyn_ftrace *rec;
933         unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
934
935         mutex_lock(&ftrace_lock);
936         if (enable)
937                 ftrace_filtered = 0;
938         do_for_each_ftrace_rec(pg, rec) {
939                 if (rec->flags & FTRACE_FL_FAILED)
940                         continue;
941                 rec->flags &= ~type;
942         } while_for_each_ftrace_rec();
943         mutex_unlock(&ftrace_lock);
944 }
945
946 static int
947 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
948 {
949         struct ftrace_iterator *iter;
950         int ret = 0;
951
952         if (unlikely(ftrace_disabled))
953                 return -ENODEV;
954
955         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
956         if (!iter)
957                 return -ENOMEM;
958
959         mutex_lock(&ftrace_regex_lock);
960         if ((file->f_mode & FMODE_WRITE) &&
961             !(file->f_flags & O_APPEND))
962                 ftrace_filter_reset(enable);
963
964         if (file->f_mode & FMODE_READ) {
965                 iter->pg = ftrace_pages_start;
966                 iter->flags = enable ? FTRACE_ITER_FILTER :
967                         FTRACE_ITER_NOTRACE;
968
969                 ret = seq_open(file, &show_ftrace_seq_ops);
970                 if (!ret) {
971                         struct seq_file *m = file->private_data;
972                         m->private = iter;
973                 } else
974                         kfree(iter);
975         } else
976                 file->private_data = iter;
977         mutex_unlock(&ftrace_regex_lock);
978
979         return ret;
980 }
981
982 static int
983 ftrace_filter_open(struct inode *inode, struct file *file)
984 {
985         return ftrace_regex_open(inode, file, 1);
986 }
987
988 static int
989 ftrace_notrace_open(struct inode *inode, struct file *file)
990 {
991         return ftrace_regex_open(inode, file, 0);
992 }
993
994 static ssize_t
995 ftrace_regex_read(struct file *file, char __user *ubuf,
996                        size_t cnt, loff_t *ppos)
997 {
998         if (file->f_mode & FMODE_READ)
999                 return seq_read(file, ubuf, cnt, ppos);
1000         else
1001                 return -EPERM;
1002 }
1003
1004 static loff_t
1005 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1006 {
1007         loff_t ret;
1008
1009         if (file->f_mode & FMODE_READ)
1010                 ret = seq_lseek(file, offset, origin);
1011         else
1012                 file->f_pos = ret = 1;
1013
1014         return ret;
1015 }
1016
1017 enum {
1018         MATCH_FULL,
1019         MATCH_FRONT_ONLY,
1020         MATCH_MIDDLE_ONLY,
1021         MATCH_END_ONLY,
1022 };
1023
1024 /*
1025  * (static function - no need for kernel doc)
1026  *
1027  * Pass in a buffer containing a glob and this function will
1028  * set search to point to the search part of the buffer and
1029  * return the type of search it is (see enum above).
1030  * This does modify buff.
1031  *
1032  * Returns enum type.
1033  *  search returns the pointer to use for comparison.
1034  *  not returns 1 if buff started with a '!'
1035  *     0 otherwise.
1036  */
1037 static int
1038 ftrace_setup_glob(char *buff, int len, char **search, int *not)
1039 {
1040         int type = MATCH_FULL;
1041         int i;
1042
1043         if (buff[0] == '!') {
1044                 *not = 1;
1045                 buff++;
1046                 len--;
1047         } else
1048                 *not = 0;
1049
1050         *search = buff;
1051
1052         for (i = 0; i < len; i++) {
1053                 if (buff[i] == '*') {
1054                         if (!i) {
1055                                 *search = buff + 1;
1056                                 type = MATCH_END_ONLY;
1057                         } else {
1058                                 if (type == MATCH_END_ONLY)
1059                                         type = MATCH_MIDDLE_ONLY;
1060                                 else
1061                                         type = MATCH_FRONT_ONLY;
1062                                 buff[i] = 0;
1063                                 break;
1064                         }
1065                 }
1066         }
1067
1068         return type;
1069 }
1070
1071 static int ftrace_match(char *str, char *regex, int len, int type)
1072 {
1073         int matched = 0;
1074         char *ptr;
1075
1076         switch (type) {
1077         case MATCH_FULL:
1078                 if (strcmp(str, regex) == 0)
1079                         matched = 1;
1080                 break;
1081         case MATCH_FRONT_ONLY:
1082                 if (strncmp(str, regex, len) == 0)
1083                         matched = 1;
1084                 break;
1085         case MATCH_MIDDLE_ONLY:
1086                 if (strstr(str, regex))
1087                         matched = 1;
1088                 break;
1089         case MATCH_END_ONLY:
1090                 ptr = strstr(str, regex);
1091                 if (ptr && (ptr[len] == 0))
1092                         matched = 1;
1093                 break;
1094         }
1095
1096         return matched;
1097 }
1098
1099 static int
1100 ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
1101 {
1102         char str[KSYM_SYMBOL_LEN];
1103
1104         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1105         return ftrace_match(str, regex, len, type);
1106 }
1107
1108 static void ftrace_match_records(char *buff, int len, int enable)
1109 {
1110         char *search;
1111         struct ftrace_page *pg;
1112         struct dyn_ftrace *rec;
1113         int type;
1114         unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1115         unsigned search_len;
1116         int not;
1117
1118         type = ftrace_setup_glob(buff, len, &search, &not);
1119
1120         search_len = strlen(search);
1121
1122         mutex_lock(&ftrace_lock);
1123         do_for_each_ftrace_rec(pg, rec) {
1124
1125                 if (rec->flags & FTRACE_FL_FAILED)
1126                         continue;
1127
1128                 if (ftrace_match_record(rec, search, search_len, type)) {
1129                         if (not)
1130                                 rec->flags &= ~flag;
1131                         else
1132                                 rec->flags |= flag;
1133                 }
1134                 /*
1135                  * Only enable filtering if we have a function that
1136                  * is filtered on.
1137                  */
1138                 if (enable && (rec->flags & FTRACE_FL_FILTER))
1139                         ftrace_filtered = 1;
1140         } while_for_each_ftrace_rec();
1141         mutex_unlock(&ftrace_lock);
1142 }
1143
1144 static int
1145 ftrace_match_module_record(struct dyn_ftrace *rec, char *mod,
1146                            char *regex, int len, int type)
1147 {
1148         char str[KSYM_SYMBOL_LEN];
1149         char *modname;
1150
1151         kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
1152
1153         if (!modname || strcmp(modname, mod))
1154                 return 0;
1155
1156         /* blank search means to match all funcs in the mod */
1157         if (len)
1158                 return ftrace_match(str, regex, len, type);
1159         else
1160                 return 1;
1161 }
1162
1163 static void ftrace_match_module_records(char *buff, char *mod, int enable)
1164 {
1165         char *search = buff;
1166         struct ftrace_page *pg;
1167         struct dyn_ftrace *rec;
1168         int type = MATCH_FULL;
1169         unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1170         unsigned search_len = 0;
1171         int not = 0;
1172
1173         /* blank or '*' mean the same */
1174         if (strcmp(buff, "*") == 0)
1175                 buff[0] = 0;
1176
1177         /* handle the case of 'dont filter this module' */
1178         if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
1179                 buff[0] = 0;
1180                 not = 1;
1181         }
1182
1183         if (strlen(buff)) {
1184                 type = ftrace_setup_glob(buff, strlen(buff), &search, &not);
1185                 search_len = strlen(search);
1186         }
1187
1188         mutex_lock(&ftrace_lock);
1189         do_for_each_ftrace_rec(pg, rec) {
1190
1191                 if (rec->flags & FTRACE_FL_FAILED)
1192                         continue;
1193
1194                 if (ftrace_match_module_record(rec, mod,
1195                                                search, search_len, type)) {
1196                         if (not)
1197                                 rec->flags &= ~flag;
1198                         else
1199                                 rec->flags |= flag;
1200                 }
1201                 if (enable && (rec->flags & FTRACE_FL_FILTER))
1202                         ftrace_filtered = 1;
1203
1204         } while_for_each_ftrace_rec();
1205         mutex_unlock(&ftrace_lock);
1206 }
1207
1208 /*
1209  * We register the module command as a template to show others how
1210  * to register the a command as well.
1211  */
1212
1213 static int
1214 ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
1215 {
1216         char *mod;
1217
1218         /*
1219          * cmd == 'mod' because we only registered this func
1220          * for the 'mod' ftrace_func_command.
1221          * But if you register one func with multiple commands,
1222          * you can tell which command was used by the cmd
1223          * parameter.
1224          */
1225
1226         /* we must have a module name */
1227         if (!param)
1228                 return -EINVAL;
1229
1230         mod = strsep(&param, ":");
1231         if (!strlen(mod))
1232                 return -EINVAL;
1233
1234         ftrace_match_module_records(func, mod, enable);
1235         return 0;
1236 }
1237
1238 static struct ftrace_func_command ftrace_mod_cmd = {
1239         .name                   = "mod",
1240         .func                   = ftrace_mod_callback,
1241 };
1242
1243 static int __init ftrace_mod_cmd_init(void)
1244 {
1245         return register_ftrace_command(&ftrace_mod_cmd);
1246 }
1247 device_initcall(ftrace_mod_cmd_init);
1248
1249 #define FTRACE_HASH_BITS 7
1250 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
1251 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
1252
1253 struct ftrace_func_hook {
1254         struct hlist_node       node;
1255         struct ftrace_hook_ops  *ops;
1256         unsigned long           flags;
1257         unsigned long           ip;
1258         void                    *data;
1259         struct rcu_head         rcu;
1260 };
1261
1262 static void
1263 function_trace_hook_call(unsigned long ip, unsigned long parent_ip)
1264 {
1265         struct ftrace_func_hook *entry;
1266         struct hlist_head *hhd;
1267         struct hlist_node *n;
1268         unsigned long key;
1269         int resched;
1270
1271         key = hash_long(ip, FTRACE_HASH_BITS);
1272
1273         hhd = &ftrace_func_hash[key];
1274
1275         if (hlist_empty(hhd))
1276                 return;
1277
1278         /*
1279          * Disable preemption for these calls to prevent a RCU grace
1280          * period. This syncs the hash iteration and freeing of items
1281          * on the hash. rcu_read_lock is too dangerous here.
1282          */
1283         resched = ftrace_preempt_disable();
1284         hlist_for_each_entry_rcu(entry, n, hhd, node) {
1285                 if (entry->ip == ip)
1286                         entry->ops->func(ip, parent_ip, &entry->data);
1287         }
1288         ftrace_preempt_enable(resched);
1289 }
1290
1291 static struct ftrace_ops trace_hook_ops __read_mostly =
1292 {
1293         .func = function_trace_hook_call,
1294 };
1295
1296 static int ftrace_hook_registered;
1297
1298 static void __enable_ftrace_function_hook(void)
1299 {
1300         int i;
1301
1302         if (ftrace_hook_registered)
1303                 return;
1304
1305         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1306                 struct hlist_head *hhd = &ftrace_func_hash[i];
1307                 if (hhd->first)
1308                         break;
1309         }
1310         /* Nothing registered? */
1311         if (i == FTRACE_FUNC_HASHSIZE)
1312                 return;
1313
1314         __register_ftrace_function(&trace_hook_ops);
1315         ftrace_startup(0);
1316         ftrace_hook_registered = 1;
1317 }
1318
1319 static void __disable_ftrace_function_hook(void)
1320 {
1321         int i;
1322
1323         if (!ftrace_hook_registered)
1324                 return;
1325
1326         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1327                 struct hlist_head *hhd = &ftrace_func_hash[i];
1328                 if (hhd->first)
1329                         return;
1330         }
1331
1332         /* no more funcs left */
1333         __unregister_ftrace_function(&trace_hook_ops);
1334         ftrace_shutdown(0);
1335         ftrace_hook_registered = 0;
1336 }
1337
1338
1339 static void ftrace_free_entry_rcu(struct rcu_head *rhp)
1340 {
1341         struct ftrace_func_hook *entry =
1342                 container_of(rhp, struct ftrace_func_hook, rcu);
1343
1344         if (entry->ops->free)
1345                 entry->ops->free(&entry->data);
1346         kfree(entry);
1347 }
1348
1349
1350 int
1351 register_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
1352                               void *data)
1353 {
1354         struct ftrace_func_hook *entry;
1355         struct ftrace_page *pg;
1356         struct dyn_ftrace *rec;
1357         unsigned long key;
1358         int type, len, not;
1359         int count = 0;
1360         char *search;
1361
1362         type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
1363         len = strlen(search);
1364
1365         /* we do not support '!' for function hooks */
1366         if (WARN_ON(not))
1367                 return -EINVAL;
1368
1369         mutex_lock(&ftrace_lock);
1370         do_for_each_ftrace_rec(pg, rec) {
1371
1372                 if (rec->flags & FTRACE_FL_FAILED)
1373                         continue;
1374
1375                 if (!ftrace_match_record(rec, search, len, type))
1376                         continue;
1377
1378                 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1379                 if (!entry) {
1380                         /* If we did not hook to any, then return error */
1381                         if (!count)
1382                                 count = -ENOMEM;
1383                         goto out_unlock;
1384                 }
1385
1386                 count++;
1387
1388                 entry->data = data;
1389
1390                 /*
1391                  * The caller might want to do something special
1392                  * for each function we find. We call the callback
1393                  * to give the caller an opportunity to do so.
1394                  */
1395                 if (ops->callback) {
1396                         if (ops->callback(rec->ip, &entry->data) < 0) {
1397                                 /* caller does not like this func */
1398                                 kfree(entry);
1399                                 continue;
1400                         }
1401                 }
1402
1403                 entry->ops = ops;
1404                 entry->ip = rec->ip;
1405
1406                 key = hash_long(entry->ip, FTRACE_HASH_BITS);
1407                 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
1408
1409         } while_for_each_ftrace_rec();
1410         __enable_ftrace_function_hook();
1411
1412  out_unlock:
1413         mutex_unlock(&ftrace_lock);
1414
1415         return count;
1416 }
1417
1418 enum {
1419         HOOK_TEST_FUNC          = 1,
1420         HOOK_TEST_DATA          = 2
1421 };
1422
1423 static void
1424 __unregister_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
1425                                   void *data, int flags)
1426 {
1427         struct ftrace_func_hook *entry;
1428         struct hlist_node *n, *tmp;
1429         char str[KSYM_SYMBOL_LEN];
1430         int type = MATCH_FULL;
1431         int i, len = 0;
1432         char *search;
1433
1434         if (glob && (strcmp(glob, "*") || !strlen(glob)))
1435                 glob = NULL;
1436         else {
1437                 int not;
1438
1439                 type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
1440                 len = strlen(search);
1441
1442                 /* we do not support '!' for function hooks */
1443                 if (WARN_ON(not))
1444                         return;
1445         }
1446
1447         mutex_lock(&ftrace_lock);
1448         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1449                 struct hlist_head *hhd = &ftrace_func_hash[i];
1450
1451                 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
1452
1453                         /* break up if statements for readability */
1454                         if ((flags & HOOK_TEST_FUNC) && entry->ops != ops)
1455                                 continue;
1456
1457                         if ((flags & HOOK_TEST_DATA) && entry->data != data)
1458                                 continue;
1459
1460                         /* do this last, since it is the most expensive */
1461                         if (glob) {
1462                                 kallsyms_lookup(entry->ip, NULL, NULL,
1463                                                 NULL, str);
1464                                 if (!ftrace_match(str, glob, len, type))
1465                                         continue;
1466                         }
1467
1468                         hlist_del(&entry->node);
1469                         call_rcu(&entry->rcu, ftrace_free_entry_rcu);
1470                 }
1471         }
1472         __disable_ftrace_function_hook();
1473         mutex_unlock(&ftrace_lock);
1474 }
1475
1476 void
1477 unregister_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
1478                                 void *data)
1479 {
1480         __unregister_ftrace_function_hook(glob, ops, data,
1481                                           HOOK_TEST_FUNC | HOOK_TEST_DATA);
1482 }
1483
1484 void
1485 unregister_ftrace_function_hook_func(char *glob, struct ftrace_hook_ops *ops)
1486 {
1487         __unregister_ftrace_function_hook(glob, ops, NULL, HOOK_TEST_FUNC);
1488 }
1489
1490 void unregister_ftrace_function_hook_all(char *glob)
1491 {
1492         __unregister_ftrace_function_hook(glob, NULL, NULL, 0);
1493 }
1494
1495 static LIST_HEAD(ftrace_commands);
1496 static DEFINE_MUTEX(ftrace_cmd_mutex);
1497
1498 int register_ftrace_command(struct ftrace_func_command *cmd)
1499 {
1500         struct ftrace_func_command *p;
1501         int ret = 0;
1502
1503         mutex_lock(&ftrace_cmd_mutex);
1504         list_for_each_entry(p, &ftrace_commands, list) {
1505                 if (strcmp(cmd->name, p->name) == 0) {
1506                         ret = -EBUSY;
1507                         goto out_unlock;
1508                 }
1509         }
1510         list_add(&cmd->list, &ftrace_commands);
1511  out_unlock:
1512         mutex_unlock(&ftrace_cmd_mutex);
1513
1514         return ret;
1515 }
1516
1517 int unregister_ftrace_command(struct ftrace_func_command *cmd)
1518 {
1519         struct ftrace_func_command *p, *n;
1520         int ret = -ENODEV;
1521
1522         mutex_lock(&ftrace_cmd_mutex);
1523         list_for_each_entry_safe(p, n, &ftrace_commands, list) {
1524                 if (strcmp(cmd->name, p->name) == 0) {
1525                         ret = 0;
1526                         list_del_init(&p->list);
1527                         goto out_unlock;
1528                 }
1529         }
1530  out_unlock:
1531         mutex_unlock(&ftrace_cmd_mutex);
1532
1533         return ret;
1534 }
1535
1536 static int ftrace_process_regex(char *buff, int len, int enable)
1537 {
1538         struct ftrace_func_command *p;
1539         char *func, *command, *next = buff;
1540         int ret = -EINVAL;
1541
1542         func = strsep(&next, ":");
1543
1544         if (!next) {
1545                 ftrace_match_records(func, len, enable);
1546                 return 0;
1547         }
1548
1549         /* command found */
1550
1551         command = strsep(&next, ":");
1552
1553         mutex_lock(&ftrace_cmd_mutex);
1554         list_for_each_entry(p, &ftrace_commands, list) {
1555                 if (strcmp(p->name, command) == 0) {
1556                         ret = p->func(func, command, next, enable);
1557                         goto out_unlock;
1558                 }
1559         }
1560  out_unlock:
1561         mutex_unlock(&ftrace_cmd_mutex);
1562
1563         return ret;
1564 }
1565
1566 static ssize_t
1567 ftrace_regex_write(struct file *file, const char __user *ubuf,
1568                    size_t cnt, loff_t *ppos, int enable)
1569 {
1570         struct ftrace_iterator *iter;
1571         char ch;
1572         size_t read = 0;
1573         ssize_t ret;
1574
1575         if (!cnt || cnt < 0)
1576                 return 0;
1577
1578         mutex_lock(&ftrace_regex_lock);
1579
1580         if (file->f_mode & FMODE_READ) {
1581                 struct seq_file *m = file->private_data;
1582                 iter = m->private;
1583         } else
1584                 iter = file->private_data;
1585
1586         if (!*ppos) {
1587                 iter->flags &= ~FTRACE_ITER_CONT;
1588                 iter->buffer_idx = 0;
1589         }
1590
1591         ret = get_user(ch, ubuf++);
1592         if (ret)
1593                 goto out;
1594         read++;
1595         cnt--;
1596
1597         if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1598                 /* skip white space */
1599                 while (cnt && isspace(ch)) {
1600                         ret = get_user(ch, ubuf++);
1601                         if (ret)
1602                                 goto out;
1603                         read++;
1604                         cnt--;
1605                 }
1606
1607                 if (isspace(ch)) {
1608                         file->f_pos += read;
1609                         ret = read;
1610                         goto out;
1611                 }
1612
1613                 iter->buffer_idx = 0;
1614         }
1615
1616         while (cnt && !isspace(ch)) {
1617                 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1618                         iter->buffer[iter->buffer_idx++] = ch;
1619                 else {
1620                         ret = -EINVAL;
1621                         goto out;
1622                 }
1623                 ret = get_user(ch, ubuf++);
1624                 if (ret)
1625                         goto out;
1626                 read++;
1627                 cnt--;
1628         }
1629
1630         if (isspace(ch)) {
1631                 iter->filtered++;
1632                 iter->buffer[iter->buffer_idx] = 0;
1633                 ret = ftrace_process_regex(iter->buffer,
1634                                            iter->buffer_idx, enable);
1635                 if (ret)
1636                         goto out;
1637                 iter->buffer_idx = 0;
1638         } else
1639                 iter->flags |= FTRACE_ITER_CONT;
1640
1641
1642         file->f_pos += read;
1643
1644         ret = read;
1645  out:
1646         mutex_unlock(&ftrace_regex_lock);
1647
1648         return ret;
1649 }
1650
1651 static ssize_t
1652 ftrace_filter_write(struct file *file, const char __user *ubuf,
1653                     size_t cnt, loff_t *ppos)
1654 {
1655         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1656 }
1657
1658 static ssize_t
1659 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1660                      size_t cnt, loff_t *ppos)
1661 {
1662         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1663 }
1664
1665 static void
1666 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1667 {
1668         if (unlikely(ftrace_disabled))
1669                 return;
1670
1671         mutex_lock(&ftrace_regex_lock);
1672         if (reset)
1673                 ftrace_filter_reset(enable);
1674         if (buf)
1675                 ftrace_match_records(buf, len, enable);
1676         mutex_unlock(&ftrace_regex_lock);
1677 }
1678
1679 /**
1680  * ftrace_set_filter - set a function to filter on in ftrace
1681  * @buf - the string that holds the function filter text.
1682  * @len - the length of the string.
1683  * @reset - non zero to reset all filters before applying this filter.
1684  *
1685  * Filters denote which functions should be enabled when tracing is enabled.
1686  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1687  */
1688 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1689 {
1690         ftrace_set_regex(buf, len, reset, 1);
1691 }
1692
1693 /**
1694  * ftrace_set_notrace - set a function to not trace in ftrace
1695  * @buf - the string that holds the function notrace text.
1696  * @len - the length of the string.
1697  * @reset - non zero to reset all filters before applying this filter.
1698  *
1699  * Notrace Filters denote which functions should not be enabled when tracing
1700  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1701  * for tracing.
1702  */
1703 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1704 {
1705         ftrace_set_regex(buf, len, reset, 0);
1706 }
1707
1708 static int
1709 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1710 {
1711         struct seq_file *m = (struct seq_file *)file->private_data;
1712         struct ftrace_iterator *iter;
1713
1714         mutex_lock(&ftrace_regex_lock);
1715         if (file->f_mode & FMODE_READ) {
1716                 iter = m->private;
1717
1718                 seq_release(inode, file);
1719         } else
1720                 iter = file->private_data;
1721
1722         if (iter->buffer_idx) {
1723                 iter->filtered++;
1724                 iter->buffer[iter->buffer_idx] = 0;
1725                 ftrace_match_records(iter->buffer, iter->buffer_idx, enable);
1726         }
1727
1728         mutex_lock(&ftrace_lock);
1729         if (ftrace_start_up && ftrace_enabled)
1730                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1731         mutex_unlock(&ftrace_lock);
1732
1733         kfree(iter);
1734         mutex_unlock(&ftrace_regex_lock);
1735         return 0;
1736 }
1737
1738 static int
1739 ftrace_filter_release(struct inode *inode, struct file *file)
1740 {
1741         return ftrace_regex_release(inode, file, 1);
1742 }
1743
1744 static int
1745 ftrace_notrace_release(struct inode *inode, struct file *file)
1746 {
1747         return ftrace_regex_release(inode, file, 0);
1748 }
1749
1750 static struct file_operations ftrace_avail_fops = {
1751         .open = ftrace_avail_open,
1752         .read = seq_read,
1753         .llseek = seq_lseek,
1754         .release = ftrace_avail_release,
1755 };
1756
1757 static struct file_operations ftrace_failures_fops = {
1758         .open = ftrace_failures_open,
1759         .read = seq_read,
1760         .llseek = seq_lseek,
1761         .release = ftrace_avail_release,
1762 };
1763
1764 static struct file_operations ftrace_filter_fops = {
1765         .open = ftrace_filter_open,
1766         .read = ftrace_regex_read,
1767         .write = ftrace_filter_write,
1768         .llseek = ftrace_regex_lseek,
1769         .release = ftrace_filter_release,
1770 };
1771
1772 static struct file_operations ftrace_notrace_fops = {
1773         .open = ftrace_notrace_open,
1774         .read = ftrace_regex_read,
1775         .write = ftrace_notrace_write,
1776         .llseek = ftrace_regex_lseek,
1777         .release = ftrace_notrace_release,
1778 };
1779
1780 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1781
1782 static DEFINE_MUTEX(graph_lock);
1783
1784 int ftrace_graph_count;
1785 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
1786
1787 static void *
1788 g_next(struct seq_file *m, void *v, loff_t *pos)
1789 {
1790         unsigned long *array = m->private;
1791         int index = *pos;
1792
1793         (*pos)++;
1794
1795         if (index >= ftrace_graph_count)
1796                 return NULL;
1797
1798         return &array[index];
1799 }
1800
1801 static void *g_start(struct seq_file *m, loff_t *pos)
1802 {
1803         void *p = NULL;
1804
1805         mutex_lock(&graph_lock);
1806
1807         p = g_next(m, p, pos);
1808
1809         return p;
1810 }
1811
1812 static void g_stop(struct seq_file *m, void *p)
1813 {
1814         mutex_unlock(&graph_lock);
1815 }
1816
1817 static int g_show(struct seq_file *m, void *v)
1818 {
1819         unsigned long *ptr = v;
1820         char str[KSYM_SYMBOL_LEN];
1821
1822         if (!ptr)
1823                 return 0;
1824
1825         kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
1826
1827         seq_printf(m, "%s\n", str);
1828
1829         return 0;
1830 }
1831
1832 static struct seq_operations ftrace_graph_seq_ops = {
1833         .start = g_start,
1834         .next = g_next,
1835         .stop = g_stop,
1836         .show = g_show,
1837 };
1838
1839 static int
1840 ftrace_graph_open(struct inode *inode, struct file *file)
1841 {
1842         int ret = 0;
1843
1844         if (unlikely(ftrace_disabled))
1845                 return -ENODEV;
1846
1847         mutex_lock(&graph_lock);
1848         if ((file->f_mode & FMODE_WRITE) &&
1849             !(file->f_flags & O_APPEND)) {
1850                 ftrace_graph_count = 0;
1851                 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
1852         }
1853
1854         if (file->f_mode & FMODE_READ) {
1855                 ret = seq_open(file, &ftrace_graph_seq_ops);
1856                 if (!ret) {
1857                         struct seq_file *m = file->private_data;
1858                         m->private = ftrace_graph_funcs;
1859                 }
1860         } else
1861                 file->private_data = ftrace_graph_funcs;
1862         mutex_unlock(&graph_lock);
1863
1864         return ret;
1865 }
1866
1867 static ssize_t
1868 ftrace_graph_read(struct file *file, char __user *ubuf,
1869                        size_t cnt, loff_t *ppos)
1870 {
1871         if (file->f_mode & FMODE_READ)
1872                 return seq_read(file, ubuf, cnt, ppos);
1873         else
1874                 return -EPERM;
1875 }
1876
1877 static int
1878 ftrace_set_func(unsigned long *array, int idx, char *buffer)
1879 {
1880         char str[KSYM_SYMBOL_LEN];
1881         struct dyn_ftrace *rec;
1882         struct ftrace_page *pg;
1883         int found = 0;
1884         int j;
1885
1886         if (ftrace_disabled)
1887                 return -ENODEV;
1888
1889         mutex_lock(&ftrace_lock);
1890         do_for_each_ftrace_rec(pg, rec) {
1891
1892                 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
1893                         continue;
1894
1895                 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1896                 if (strcmp(str, buffer) == 0) {
1897                         /* Return 1 if we add it to the array */
1898                         found = 1;
1899                         for (j = 0; j < idx; j++)
1900                                 if (array[j] == rec->ip) {
1901                                         found = 0;
1902                                         break;
1903                                 }
1904                         if (found)
1905                                 array[idx] = rec->ip;
1906                         goto out;
1907                 }
1908         } while_for_each_ftrace_rec();
1909  out:
1910         mutex_unlock(&ftrace_lock);
1911
1912         return found ? 0 : -EINVAL;
1913 }
1914
1915 static ssize_t
1916 ftrace_graph_write(struct file *file, const char __user *ubuf,
1917                    size_t cnt, loff_t *ppos)
1918 {
1919         unsigned char buffer[FTRACE_BUFF_MAX+1];
1920         unsigned long *array;
1921         size_t read = 0;
1922         ssize_t ret;
1923         int index = 0;
1924         char ch;
1925
1926         if (!cnt || cnt < 0)
1927                 return 0;
1928
1929         mutex_lock(&graph_lock);
1930
1931         if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
1932                 ret = -EBUSY;
1933                 goto out;
1934         }
1935
1936         if (file->f_mode & FMODE_READ) {
1937                 struct seq_file *m = file->private_data;
1938                 array = m->private;
1939         } else
1940                 array = file->private_data;
1941
1942         ret = get_user(ch, ubuf++);
1943         if (ret)
1944                 goto out;
1945         read++;
1946         cnt--;
1947
1948         /* skip white space */
1949         while (cnt && isspace(ch)) {
1950                 ret = get_user(ch, ubuf++);
1951                 if (ret)
1952                         goto out;
1953                 read++;
1954                 cnt--;
1955         }
1956
1957         if (isspace(ch)) {
1958                 *ppos += read;
1959                 ret = read;
1960                 goto out;
1961         }
1962
1963         while (cnt && !isspace(ch)) {
1964                 if (index < FTRACE_BUFF_MAX)
1965                         buffer[index++] = ch;
1966                 else {
1967                         ret = -EINVAL;
1968                         goto out;
1969                 }
1970                 ret = get_user(ch, ubuf++);
1971                 if (ret)
1972                         goto out;
1973                 read++;
1974                 cnt--;
1975         }
1976         buffer[index] = 0;
1977
1978         /* we allow only one at a time */
1979         ret = ftrace_set_func(array, ftrace_graph_count, buffer);
1980         if (ret)
1981                 goto out;
1982
1983         ftrace_graph_count++;
1984
1985         file->f_pos += read;
1986
1987         ret = read;
1988  out:
1989         mutex_unlock(&graph_lock);
1990
1991         return ret;
1992 }
1993
1994 static const struct file_operations ftrace_graph_fops = {
1995         .open = ftrace_graph_open,
1996         .read = ftrace_graph_read,
1997         .write = ftrace_graph_write,
1998 };
1999 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2000
2001 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
2002 {
2003         struct dentry *entry;
2004
2005         entry = debugfs_create_file("available_filter_functions", 0444,
2006                                     d_tracer, NULL, &ftrace_avail_fops);
2007         if (!entry)
2008                 pr_warning("Could not create debugfs "
2009                            "'available_filter_functions' entry\n");
2010
2011         entry = debugfs_create_file("failures", 0444,
2012                                     d_tracer, NULL, &ftrace_failures_fops);
2013         if (!entry)
2014                 pr_warning("Could not create debugfs 'failures' entry\n");
2015
2016         entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
2017                                     NULL, &ftrace_filter_fops);
2018         if (!entry)
2019                 pr_warning("Could not create debugfs "
2020                            "'set_ftrace_filter' entry\n");
2021
2022         entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
2023                                     NULL, &ftrace_notrace_fops);
2024         if (!entry)
2025                 pr_warning("Could not create debugfs "
2026                            "'set_ftrace_notrace' entry\n");
2027
2028 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2029         entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
2030                                     NULL,
2031                                     &ftrace_graph_fops);
2032         if (!entry)
2033                 pr_warning("Could not create debugfs "
2034                            "'set_graph_function' entry\n");
2035 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2036
2037         return 0;
2038 }
2039
2040 static int ftrace_convert_nops(struct module *mod,
2041                                unsigned long *start,
2042                                unsigned long *end)
2043 {
2044         unsigned long *p;
2045         unsigned long addr;
2046         unsigned long flags;
2047
2048         mutex_lock(&ftrace_lock);
2049         p = start;
2050         while (p < end) {
2051                 addr = ftrace_call_adjust(*p++);
2052                 /*
2053                  * Some architecture linkers will pad between
2054                  * the different mcount_loc sections of different
2055                  * object files to satisfy alignments.
2056                  * Skip any NULL pointers.
2057                  */
2058                 if (!addr)
2059                         continue;
2060                 ftrace_record_ip(addr);
2061         }
2062
2063         /* disable interrupts to prevent kstop machine */
2064         local_irq_save(flags);
2065         ftrace_update_code(mod);
2066         local_irq_restore(flags);
2067         mutex_unlock(&ftrace_lock);
2068
2069         return 0;
2070 }
2071
2072 void ftrace_init_module(struct module *mod,
2073                         unsigned long *start, unsigned long *end)
2074 {
2075         if (ftrace_disabled || start == end)
2076                 return;
2077         ftrace_convert_nops(mod, start, end);
2078 }
2079
2080 extern unsigned long __start_mcount_loc[];
2081 extern unsigned long __stop_mcount_loc[];
2082
2083 void __init ftrace_init(void)
2084 {
2085         unsigned long count, addr, flags;
2086         int ret;
2087
2088         /* Keep the ftrace pointer to the stub */
2089         addr = (unsigned long)ftrace_stub;
2090
2091         local_irq_save(flags);
2092         ftrace_dyn_arch_init(&addr);
2093         local_irq_restore(flags);
2094
2095         /* ftrace_dyn_arch_init places the return code in addr */
2096         if (addr)
2097                 goto failed;
2098
2099         count = __stop_mcount_loc - __start_mcount_loc;
2100
2101         ret = ftrace_dyn_table_alloc(count);
2102         if (ret)
2103                 goto failed;
2104
2105         last_ftrace_enabled = ftrace_enabled = 1;
2106
2107         ret = ftrace_convert_nops(NULL,
2108                                   __start_mcount_loc,
2109                                   __stop_mcount_loc);
2110
2111         return;
2112  failed:
2113         ftrace_disabled = 1;
2114 }
2115
2116 #else
2117
2118 static int __init ftrace_nodyn_init(void)
2119 {
2120         ftrace_enabled = 1;
2121         return 0;
2122 }
2123 device_initcall(ftrace_nodyn_init);
2124
2125 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
2126 static inline void ftrace_startup_enable(int command) { }
2127 /* Keep as macros so we do not need to define the commands */
2128 # define ftrace_startup(command)        do { } while (0)
2129 # define ftrace_shutdown(command)       do { } while (0)
2130 # define ftrace_startup_sysctl()        do { } while (0)
2131 # define ftrace_shutdown_sysctl()       do { } while (0)
2132 #endif /* CONFIG_DYNAMIC_FTRACE */
2133
2134 static ssize_t
2135 ftrace_pid_read(struct file *file, char __user *ubuf,
2136                        size_t cnt, loff_t *ppos)
2137 {
2138         char buf[64];
2139         int r;
2140
2141         if (ftrace_pid_trace == ftrace_swapper_pid)
2142                 r = sprintf(buf, "swapper tasks\n");
2143         else if (ftrace_pid_trace)
2144                 r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace));
2145         else
2146                 r = sprintf(buf, "no pid\n");
2147
2148         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2149 }
2150
2151 static void clear_ftrace_swapper(void)
2152 {
2153         struct task_struct *p;
2154         int cpu;
2155
2156         get_online_cpus();
2157         for_each_online_cpu(cpu) {
2158                 p = idle_task(cpu);
2159                 clear_tsk_trace_trace(p);
2160         }
2161         put_online_cpus();
2162 }
2163
2164 static void set_ftrace_swapper(void)
2165 {
2166         struct task_struct *p;
2167         int cpu;
2168
2169         get_online_cpus();
2170         for_each_online_cpu(cpu) {
2171                 p = idle_task(cpu);
2172                 set_tsk_trace_trace(p);
2173         }
2174         put_online_cpus();
2175 }
2176
2177 static void clear_ftrace_pid(struct pid *pid)
2178 {
2179         struct task_struct *p;
2180
2181         rcu_read_lock();
2182         do_each_pid_task(pid, PIDTYPE_PID, p) {
2183                 clear_tsk_trace_trace(p);
2184         } while_each_pid_task(pid, PIDTYPE_PID, p);
2185         rcu_read_unlock();
2186
2187         put_pid(pid);
2188 }
2189
2190 static void set_ftrace_pid(struct pid *pid)
2191 {
2192         struct task_struct *p;
2193
2194         rcu_read_lock();
2195         do_each_pid_task(pid, PIDTYPE_PID, p) {
2196                 set_tsk_trace_trace(p);
2197         } while_each_pid_task(pid, PIDTYPE_PID, p);
2198         rcu_read_unlock();
2199 }
2200
2201 static void clear_ftrace_pid_task(struct pid **pid)
2202 {
2203         if (*pid == ftrace_swapper_pid)
2204                 clear_ftrace_swapper();
2205         else
2206                 clear_ftrace_pid(*pid);
2207
2208         *pid = NULL;
2209 }
2210
2211 static void set_ftrace_pid_task(struct pid *pid)
2212 {
2213         if (pid == ftrace_swapper_pid)
2214                 set_ftrace_swapper();
2215         else
2216                 set_ftrace_pid(pid);
2217 }
2218
2219 static ssize_t
2220 ftrace_pid_write(struct file *filp, const char __user *ubuf,
2221                    size_t cnt, loff_t *ppos)
2222 {
2223         struct pid *pid;
2224         char buf[64];
2225         long val;
2226         int ret;
2227
2228         if (cnt >= sizeof(buf))
2229                 return -EINVAL;
2230
2231         if (copy_from_user(&buf, ubuf, cnt))
2232                 return -EFAULT;
2233
2234         buf[cnt] = 0;
2235
2236         ret = strict_strtol(buf, 10, &val);
2237         if (ret < 0)
2238                 return ret;
2239
2240         mutex_lock(&ftrace_lock);
2241         if (val < 0) {
2242                 /* disable pid tracing */
2243                 if (!ftrace_pid_trace)
2244                         goto out;
2245
2246                 clear_ftrace_pid_task(&ftrace_pid_trace);
2247
2248         } else {
2249                 /* swapper task is special */
2250                 if (!val) {
2251                         pid = ftrace_swapper_pid;
2252                         if (pid == ftrace_pid_trace)
2253                                 goto out;
2254                 } else {
2255                         pid = find_get_pid(val);
2256
2257                         if (pid == ftrace_pid_trace) {
2258                                 put_pid(pid);
2259                                 goto out;
2260                         }
2261                 }
2262
2263                 if (ftrace_pid_trace)
2264                         clear_ftrace_pid_task(&ftrace_pid_trace);
2265
2266                 if (!pid)
2267                         goto out;
2268
2269                 ftrace_pid_trace = pid;
2270
2271                 set_ftrace_pid_task(ftrace_pid_trace);
2272         }
2273
2274         /* update the function call */
2275         ftrace_update_pid_func();
2276         ftrace_startup_enable(0);
2277
2278  out:
2279         mutex_unlock(&ftrace_lock);
2280
2281         return cnt;
2282 }
2283
2284 static struct file_operations ftrace_pid_fops = {
2285         .read = ftrace_pid_read,
2286         .write = ftrace_pid_write,
2287 };
2288
2289 static __init int ftrace_init_debugfs(void)
2290 {
2291         struct dentry *d_tracer;
2292         struct dentry *entry;
2293
2294         d_tracer = tracing_init_dentry();
2295         if (!d_tracer)
2296                 return 0;
2297
2298         ftrace_init_dyn_debugfs(d_tracer);
2299
2300         entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
2301                                     NULL, &ftrace_pid_fops);
2302         if (!entry)
2303                 pr_warning("Could not create debugfs "
2304                            "'set_ftrace_pid' entry\n");
2305         return 0;
2306 }
2307
2308 fs_initcall(ftrace_init_debugfs);
2309
2310 /**
2311  * ftrace_kill - kill ftrace
2312  *
2313  * This function should be used by panic code. It stops ftrace
2314  * but in a not so nice way. If you need to simply kill ftrace
2315  * from a non-atomic section, use ftrace_kill.
2316  */
2317 void ftrace_kill(void)
2318 {
2319         ftrace_disabled = 1;
2320         ftrace_enabled = 0;
2321         clear_ftrace_function();
2322 }
2323
2324 /**
2325  * register_ftrace_function - register a function for profiling
2326  * @ops - ops structure that holds the function for profiling.
2327  *
2328  * Register a function to be called by all functions in the
2329  * kernel.
2330  *
2331  * Note: @ops->func and all the functions it calls must be labeled
2332  *       with "notrace", otherwise it will go into a
2333  *       recursive loop.
2334  */
2335 int register_ftrace_function(struct ftrace_ops *ops)
2336 {
2337         int ret;
2338
2339         if (unlikely(ftrace_disabled))
2340                 return -1;
2341
2342         mutex_lock(&ftrace_lock);
2343
2344         ret = __register_ftrace_function(ops);
2345         ftrace_startup(0);
2346
2347         mutex_unlock(&ftrace_lock);
2348         return ret;
2349 }
2350
2351 /**
2352  * unregister_ftrace_function - unregister a function for profiling.
2353  * @ops - ops structure that holds the function to unregister
2354  *
2355  * Unregister a function that was added to be called by ftrace profiling.
2356  */
2357 int unregister_ftrace_function(struct ftrace_ops *ops)
2358 {
2359         int ret;
2360
2361         mutex_lock(&ftrace_lock);
2362         ret = __unregister_ftrace_function(ops);
2363         ftrace_shutdown(0);
2364         mutex_unlock(&ftrace_lock);
2365
2366         return ret;
2367 }
2368
2369 int
2370 ftrace_enable_sysctl(struct ctl_table *table, int write,
2371                      struct file *file, void __user *buffer, size_t *lenp,
2372                      loff_t *ppos)
2373 {
2374         int ret;
2375
2376         if (unlikely(ftrace_disabled))
2377                 return -ENODEV;
2378
2379         mutex_lock(&ftrace_lock);
2380
2381         ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
2382
2383         if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
2384                 goto out;
2385
2386         last_ftrace_enabled = ftrace_enabled;
2387
2388         if (ftrace_enabled) {
2389
2390                 ftrace_startup_sysctl();
2391
2392                 /* we are starting ftrace again */
2393                 if (ftrace_list != &ftrace_list_end) {
2394                         if (ftrace_list->next == &ftrace_list_end)
2395                                 ftrace_trace_function = ftrace_list->func;
2396                         else
2397                                 ftrace_trace_function = ftrace_list_func;
2398                 }
2399
2400         } else {
2401                 /* stopping ftrace calls (just send to ftrace_stub) */
2402                 ftrace_trace_function = ftrace_stub;
2403
2404                 ftrace_shutdown_sysctl();
2405         }
2406
2407  out:
2408         mutex_unlock(&ftrace_lock);
2409         return ret;
2410 }
2411
2412 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2413
2414 static atomic_t ftrace_graph_active;
2415 static struct notifier_block ftrace_suspend_notifier;
2416
2417 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
2418 {
2419         return 0;
2420 }
2421
2422 /* The callbacks that hook a function */
2423 trace_func_graph_ret_t ftrace_graph_return =
2424                         (trace_func_graph_ret_t)ftrace_stub;
2425 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
2426
2427 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
2428 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
2429 {
2430         int i;
2431         int ret = 0;
2432         unsigned long flags;
2433         int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
2434         struct task_struct *g, *t;
2435
2436         for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
2437                 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
2438                                         * sizeof(struct ftrace_ret_stack),
2439                                         GFP_KERNEL);
2440                 if (!ret_stack_list[i]) {
2441                         start = 0;
2442                         end = i;
2443                         ret = -ENOMEM;
2444                         goto free;
2445                 }
2446         }
2447
2448         read_lock_irqsave(&tasklist_lock, flags);
2449         do_each_thread(g, t) {
2450                 if (start == end) {
2451                         ret = -EAGAIN;
2452                         goto unlock;
2453                 }
2454
2455                 if (t->ret_stack == NULL) {
2456                         t->curr_ret_stack = -1;
2457                         /* Make sure IRQs see the -1 first: */
2458                         barrier();
2459                         t->ret_stack = ret_stack_list[start++];
2460                         atomic_set(&t->tracing_graph_pause, 0);
2461                         atomic_set(&t->trace_overrun, 0);
2462                 }
2463         } while_each_thread(g, t);
2464
2465 unlock:
2466         read_unlock_irqrestore(&tasklist_lock, flags);
2467 free:
2468         for (i = start; i < end; i++)
2469                 kfree(ret_stack_list[i]);
2470         return ret;
2471 }
2472
2473 /* Allocate a return stack for each task */
2474 static int start_graph_tracing(void)
2475 {
2476         struct ftrace_ret_stack **ret_stack_list;
2477         int ret;
2478
2479         ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
2480                                 sizeof(struct ftrace_ret_stack *),
2481                                 GFP_KERNEL);
2482
2483         if (!ret_stack_list)
2484                 return -ENOMEM;
2485
2486         do {
2487                 ret = alloc_retstack_tasklist(ret_stack_list);
2488         } while (ret == -EAGAIN);
2489
2490         kfree(ret_stack_list);
2491         return ret;
2492 }
2493
2494 /*
2495  * Hibernation protection.
2496  * The state of the current task is too much unstable during
2497  * suspend/restore to disk. We want to protect against that.
2498  */
2499 static int
2500 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
2501                                                         void *unused)
2502 {
2503         switch (state) {
2504         case PM_HIBERNATION_PREPARE:
2505                 pause_graph_tracing();
2506                 break;
2507
2508         case PM_POST_HIBERNATION:
2509                 unpause_graph_tracing();
2510                 break;
2511         }
2512         return NOTIFY_DONE;
2513 }
2514
2515 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2516                         trace_func_graph_ent_t entryfunc)
2517 {
2518         int ret = 0;
2519
2520         mutex_lock(&ftrace_lock);
2521
2522         ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
2523         register_pm_notifier(&ftrace_suspend_notifier);
2524
2525         atomic_inc(&ftrace_graph_active);
2526         ret = start_graph_tracing();
2527         if (ret) {
2528                 atomic_dec(&ftrace_graph_active);
2529                 goto out;
2530         }
2531
2532         ftrace_graph_return = retfunc;
2533         ftrace_graph_entry = entryfunc;
2534
2535         ftrace_startup(FTRACE_START_FUNC_RET);
2536
2537 out:
2538         mutex_unlock(&ftrace_lock);
2539         return ret;
2540 }
2541
2542 void unregister_ftrace_graph(void)
2543 {
2544         mutex_lock(&ftrace_lock);
2545
2546         atomic_dec(&ftrace_graph_active);
2547         ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
2548         ftrace_graph_entry = ftrace_graph_entry_stub;
2549         ftrace_shutdown(FTRACE_STOP_FUNC_RET);
2550         unregister_pm_notifier(&ftrace_suspend_notifier);
2551
2552         mutex_unlock(&ftrace_lock);
2553 }
2554
2555 /* Allocate a return stack for newly created task */
2556 void ftrace_graph_init_task(struct task_struct *t)
2557 {
2558         if (atomic_read(&ftrace_graph_active)) {
2559                 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
2560                                 * sizeof(struct ftrace_ret_stack),
2561                                 GFP_KERNEL);
2562                 if (!t->ret_stack)
2563                         return;
2564                 t->curr_ret_stack = -1;
2565                 atomic_set(&t->tracing_graph_pause, 0);
2566                 atomic_set(&t->trace_overrun, 0);
2567         } else
2568                 t->ret_stack = NULL;
2569 }
2570
2571 void ftrace_graph_exit_task(struct task_struct *t)
2572 {
2573         struct ftrace_ret_stack *ret_stack = t->ret_stack;
2574
2575         t->ret_stack = NULL;
2576         /* NULL must become visible to IRQs before we free it: */
2577         barrier();
2578
2579         kfree(ret_stack);
2580 }
2581
2582 void ftrace_graph_stop(void)
2583 {
2584         ftrace_stop();
2585 }
2586 #endif
2587