ftrace: freeze kprobe'd records
[safe/jmp/linux-2.6] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/ftrace.h>
25 #include <linux/sysctl.h>
26 #include <linux/ctype.h>
27 #include <linux/hash.h>
28 #include <linux/list.h>
29
30 #include <asm/ftrace.h>
31
32 #include "trace.h"
33
34 /* ftrace_enabled is a method to turn ftrace on or off */
35 int ftrace_enabled __read_mostly;
36 static int last_ftrace_enabled;
37
38 /*
39  * ftrace_disabled is set when an anomaly is discovered.
40  * ftrace_disabled is much stronger than ftrace_enabled.
41  */
42 static int ftrace_disabled __read_mostly;
43
44 static DEFINE_SPINLOCK(ftrace_lock);
45 static DEFINE_MUTEX(ftrace_sysctl_lock);
46
47 static struct ftrace_ops ftrace_list_end __read_mostly =
48 {
49         .func = ftrace_stub,
50 };
51
52 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
53 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
54
55 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
56 {
57         struct ftrace_ops *op = ftrace_list;
58
59         /* in case someone actually ports this to alpha! */
60         read_barrier_depends();
61
62         while (op != &ftrace_list_end) {
63                 /* silly alpha */
64                 read_barrier_depends();
65                 op->func(ip, parent_ip);
66                 op = op->next;
67         };
68 }
69
70 /**
71  * clear_ftrace_function - reset the ftrace function
72  *
73  * This NULLs the ftrace function and in essence stops
74  * tracing.  There may be lag
75  */
76 void clear_ftrace_function(void)
77 {
78         ftrace_trace_function = ftrace_stub;
79 }
80
81 static int __register_ftrace_function(struct ftrace_ops *ops)
82 {
83         /* Should never be called by interrupts */
84         spin_lock(&ftrace_lock);
85
86         ops->next = ftrace_list;
87         /*
88          * We are entering ops into the ftrace_list but another
89          * CPU might be walking that list. We need to make sure
90          * the ops->next pointer is valid before another CPU sees
91          * the ops pointer included into the ftrace_list.
92          */
93         smp_wmb();
94         ftrace_list = ops;
95
96         if (ftrace_enabled) {
97                 /*
98                  * For one func, simply call it directly.
99                  * For more than one func, call the chain.
100                  */
101                 if (ops->next == &ftrace_list_end)
102                         ftrace_trace_function = ops->func;
103                 else
104                         ftrace_trace_function = ftrace_list_func;
105         }
106
107         spin_unlock(&ftrace_lock);
108
109         return 0;
110 }
111
112 static int __unregister_ftrace_function(struct ftrace_ops *ops)
113 {
114         struct ftrace_ops **p;
115         int ret = 0;
116
117         spin_lock(&ftrace_lock);
118
119         /*
120          * If we are removing the last function, then simply point
121          * to the ftrace_stub.
122          */
123         if (ftrace_list == ops && ops->next == &ftrace_list_end) {
124                 ftrace_trace_function = ftrace_stub;
125                 ftrace_list = &ftrace_list_end;
126                 goto out;
127         }
128
129         for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
130                 if (*p == ops)
131                         break;
132
133         if (*p != ops) {
134                 ret = -1;
135                 goto out;
136         }
137
138         *p = (*p)->next;
139
140         if (ftrace_enabled) {
141                 /* If we only have one func left, then call that directly */
142                 if (ftrace_list == &ftrace_list_end ||
143                     ftrace_list->next == &ftrace_list_end)
144                         ftrace_trace_function = ftrace_list->func;
145         }
146
147  out:
148         spin_unlock(&ftrace_lock);
149
150         return ret;
151 }
152
153 #ifdef CONFIG_DYNAMIC_FTRACE
154
155 static struct task_struct *ftraced_task;
156
157 enum {
158         FTRACE_ENABLE_CALLS             = (1 << 0),
159         FTRACE_DISABLE_CALLS            = (1 << 1),
160         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
161         FTRACE_ENABLE_MCOUNT            = (1 << 3),
162         FTRACE_DISABLE_MCOUNT           = (1 << 4),
163 };
164
165 static int ftrace_filtered;
166 static int tracing_on;
167 static int frozen_record_count;
168
169 static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
170
171 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
172
173 static DEFINE_SPINLOCK(ftrace_shutdown_lock);
174 static DEFINE_MUTEX(ftraced_lock);
175 static DEFINE_MUTEX(ftrace_regex_lock);
176
177 struct ftrace_page {
178         struct ftrace_page      *next;
179         unsigned long           index;
180         struct dyn_ftrace       records[];
181 };
182
183 #define ENTRIES_PER_PAGE \
184   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
185
186 /* estimate from running different kernels */
187 #define NR_TO_INIT              10000
188
189 static struct ftrace_page       *ftrace_pages_start;
190 static struct ftrace_page       *ftrace_pages;
191
192 static int ftraced_trigger;
193 static int ftraced_suspend;
194 static int ftraced_stop;
195
196 static int ftrace_record_suspend;
197
198 static struct dyn_ftrace *ftrace_free_records;
199
200
201 #ifdef CONFIG_KPROBES
202 static inline void freeze_record(struct dyn_ftrace *rec)
203 {
204         if (!(rec->flags & FTRACE_FL_FROZEN)) {
205                 rec->flags |= FTRACE_FL_FROZEN;
206                 frozen_record_count++;
207         }
208 }
209
210 static inline void unfreeze_record(struct dyn_ftrace *rec)
211 {
212         if (rec->flags & FTRACE_FL_FROZEN) {
213                 rec->flags &= ~FTRACE_FL_FROZEN;
214                 frozen_record_count--;
215         }
216 }
217
218 static inline int record_frozen(struct dyn_ftrace *rec)
219 {
220         return rec->flags & FTRACE_FL_FROZEN;
221 }
222 #else
223 # define freeze_record(rec)                     ({ 0; })
224 # define unfreeze_record(rec)                   ({ 0; })
225 # define record_frozen(rec)                     ({ 0; })
226 #endif /* CONFIG_KPROBES */
227
228 int skip_trace(unsigned long ip)
229 {
230         unsigned long fl;
231         struct dyn_ftrace *rec;
232         struct hlist_node *t;
233         struct hlist_head *head;
234
235         if (frozen_record_count == 0)
236                 return 0;
237
238         head = &ftrace_hash[hash_long(ip, FTRACE_HASHBITS)];
239         hlist_for_each_entry_rcu(rec, t, head, node) {
240                 if (rec->ip == ip) {
241                         if (record_frozen(rec)) {
242                                 if (rec->flags & FTRACE_FL_FAILED)
243                                         return 1;
244
245                                 if (!(rec->flags & FTRACE_FL_CONVERTED))
246                                         return 1;
247
248                                 if (!tracing_on || !ftrace_enabled)
249                                         return 1;
250
251                                 if (ftrace_filtered) {
252                                         fl = rec->flags & (FTRACE_FL_FILTER |
253                                                            FTRACE_FL_NOTRACE);
254                                         if (!fl || (fl & FTRACE_FL_NOTRACE))
255                                                 return 1;
256                                 }
257                         }
258                         break;
259                 }
260         }
261
262         return 0;
263 }
264
265 static inline int
266 ftrace_ip_in_hash(unsigned long ip, unsigned long key)
267 {
268         struct dyn_ftrace *p;
269         struct hlist_node *t;
270         int found = 0;
271
272         hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) {
273                 if (p->ip == ip) {
274                         found = 1;
275                         break;
276                 }
277         }
278
279         return found;
280 }
281
282 static inline void
283 ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
284 {
285         hlist_add_head_rcu(&node->node, &ftrace_hash[key]);
286 }
287
288 /* called from kstop_machine */
289 static inline void ftrace_del_hash(struct dyn_ftrace *node)
290 {
291         hlist_del(&node->node);
292 }
293
294 static void ftrace_free_rec(struct dyn_ftrace *rec)
295 {
296         /* no locking, only called from kstop_machine */
297
298         rec->ip = (unsigned long)ftrace_free_records;
299         ftrace_free_records = rec;
300         rec->flags |= FTRACE_FL_FREE;
301 }
302
303 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
304 {
305         struct dyn_ftrace *rec;
306
307         /* First check for freed records */
308         if (ftrace_free_records) {
309                 rec = ftrace_free_records;
310
311                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
312                         WARN_ON_ONCE(1);
313                         ftrace_free_records = NULL;
314                         ftrace_disabled = 1;
315                         ftrace_enabled = 0;
316                         return NULL;
317                 }
318
319                 ftrace_free_records = (void *)rec->ip;
320                 memset(rec, 0, sizeof(*rec));
321                 return rec;
322         }
323
324         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
325                 if (!ftrace_pages->next)
326                         return NULL;
327                 ftrace_pages = ftrace_pages->next;
328         }
329
330         return &ftrace_pages->records[ftrace_pages->index++];
331 }
332
333 static void
334 ftrace_record_ip(unsigned long ip)
335 {
336         struct dyn_ftrace *node;
337         unsigned long flags;
338         unsigned long key;
339         int resched;
340         int atomic;
341         int cpu;
342
343         if (!ftrace_enabled || ftrace_disabled)
344                 return;
345
346         resched = need_resched();
347         preempt_disable_notrace();
348
349         /*
350          * We simply need to protect against recursion.
351          * Use the the raw version of smp_processor_id and not
352          * __get_cpu_var which can call debug hooks that can
353          * cause a recursive crash here.
354          */
355         cpu = raw_smp_processor_id();
356         per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
357         if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
358                 goto out;
359
360         if (unlikely(ftrace_record_suspend))
361                 goto out;
362
363         key = hash_long(ip, FTRACE_HASHBITS);
364
365         WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
366
367         if (ftrace_ip_in_hash(ip, key))
368                 goto out;
369
370         atomic = irqs_disabled();
371
372         spin_lock_irqsave(&ftrace_shutdown_lock, flags);
373
374         /* This ip may have hit the hash before the lock */
375         if (ftrace_ip_in_hash(ip, key))
376                 goto out_unlock;
377
378         node = ftrace_alloc_dyn_node(ip);
379         if (!node)
380                 goto out_unlock;
381
382         node->ip = ip;
383
384         ftrace_add_hash(node, key);
385
386         ftraced_trigger = 1;
387
388  out_unlock:
389         spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
390  out:
391         per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
392
393         /* prevent recursion with scheduler */
394         if (resched)
395                 preempt_enable_no_resched_notrace();
396         else
397                 preempt_enable_notrace();
398 }
399
400 #define FTRACE_ADDR ((long)(ftrace_caller))
401
402 static int
403 __ftrace_replace_code(struct dyn_ftrace *rec,
404                       unsigned char *old, unsigned char *new, int enable)
405 {
406         unsigned long ip, fl;
407
408         ip = rec->ip;
409
410         if (ftrace_filtered && enable) {
411                 /*
412                  * If filtering is on:
413                  *
414                  * If this record is set to be filtered and
415                  * is enabled then do nothing.
416                  *
417                  * If this record is set to be filtered and
418                  * it is not enabled, enable it.
419                  *
420                  * If this record is not set to be filtered
421                  * and it is not enabled do nothing.
422                  *
423                  * If this record is set not to trace then
424                  * do nothing.
425                  *
426                  * If this record is set not to trace and
427                  * it is enabled then disable it.
428                  *
429                  * If this record is not set to be filtered and
430                  * it is enabled, disable it.
431                  */
432
433                 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
434                                    FTRACE_FL_ENABLED);
435
436                 if ((fl ==  (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
437                     (fl ==  (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
438                     !fl || (fl == FTRACE_FL_NOTRACE))
439                         return 0;
440
441                 /*
442                  * If it is enabled disable it,
443                  * otherwise enable it!
444                  */
445                 if (fl & FTRACE_FL_ENABLED) {
446                         /* swap new and old */
447                         new = old;
448                         old = ftrace_call_replace(ip, FTRACE_ADDR);
449                         rec->flags &= ~FTRACE_FL_ENABLED;
450                 } else {
451                         new = ftrace_call_replace(ip, FTRACE_ADDR);
452                         rec->flags |= FTRACE_FL_ENABLED;
453                 }
454         } else {
455
456                 if (enable) {
457                         /*
458                          * If this record is set not to trace and is
459                          * not enabled, do nothing.
460                          */
461                         fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
462                         if (fl == FTRACE_FL_NOTRACE)
463                                 return 0;
464
465                         new = ftrace_call_replace(ip, FTRACE_ADDR);
466                 } else
467                         old = ftrace_call_replace(ip, FTRACE_ADDR);
468
469                 if (enable) {
470                         if (rec->flags & FTRACE_FL_ENABLED)
471                                 return 0;
472                         rec->flags |= FTRACE_FL_ENABLED;
473                 } else {
474                         if (!(rec->flags & FTRACE_FL_ENABLED))
475                                 return 0;
476                         rec->flags &= ~FTRACE_FL_ENABLED;
477                 }
478         }
479
480         return ftrace_modify_code(ip, old, new);
481 }
482
483 static void ftrace_replace_code(int enable)
484 {
485         int i, failed;
486         unsigned char *new = NULL, *old = NULL;
487         struct dyn_ftrace *rec;
488         struct ftrace_page *pg;
489
490         if (enable)
491                 old = ftrace_nop_replace();
492         else
493                 new = ftrace_nop_replace();
494
495         for (pg = ftrace_pages_start; pg; pg = pg->next) {
496                 for (i = 0; i < pg->index; i++) {
497                         rec = &pg->records[i];
498
499                         /* don't modify code that has already faulted */
500                         if (rec->flags & FTRACE_FL_FAILED)
501                                 continue;
502
503                         failed = __ftrace_replace_code(rec, old, new, enable);
504                         if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
505                                 rec->flags |= FTRACE_FL_FAILED;
506                                 if ((system_state == SYSTEM_BOOTING) ||
507                                     !core_kernel_text(rec->ip)) {
508                                         ftrace_del_hash(rec);
509                                         ftrace_free_rec(rec);
510                                 }
511                         }
512                 }
513         }
514 }
515
516 static void ftrace_shutdown_replenish(void)
517 {
518         if (ftrace_pages->next)
519                 return;
520
521         /* allocate another page */
522         ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
523 }
524
525 static int
526 ftrace_code_disable(struct dyn_ftrace *rec)
527 {
528         unsigned long ip;
529         unsigned char *nop, *call;
530         int failed;
531
532         ip = rec->ip;
533
534         nop = ftrace_nop_replace();
535         call = ftrace_call_replace(ip, MCOUNT_ADDR);
536
537         failed = ftrace_modify_code(ip, call, nop);
538         if (failed) {
539                 rec->flags |= FTRACE_FL_FAILED;
540                 return 0;
541         }
542         return 1;
543 }
544
545 static int __ftrace_update_code(void *ignore);
546
547 static int __ftrace_modify_code(void *data)
548 {
549         unsigned long addr;
550         int *command = data;
551
552         if (*command & FTRACE_ENABLE_CALLS) {
553                 /*
554                  * Update any recorded ips now that we have the
555                  * machine stopped
556                  */
557                 __ftrace_update_code(NULL);
558                 ftrace_replace_code(1);
559                 tracing_on = 1;
560         } else if (*command & FTRACE_DISABLE_CALLS) {
561                 ftrace_replace_code(0);
562                 tracing_on = 0;
563         }
564
565         if (*command & FTRACE_UPDATE_TRACE_FUNC)
566                 ftrace_update_ftrace_func(ftrace_trace_function);
567
568         if (*command & FTRACE_ENABLE_MCOUNT) {
569                 addr = (unsigned long)ftrace_record_ip;
570                 ftrace_mcount_set(&addr);
571         } else if (*command & FTRACE_DISABLE_MCOUNT) {
572                 addr = (unsigned long)ftrace_stub;
573                 ftrace_mcount_set(&addr);
574         }
575
576         return 0;
577 }
578
579 static void ftrace_run_update_code(int command)
580 {
581         stop_machine_run(__ftrace_modify_code, &command, NR_CPUS);
582 }
583
584 void ftrace_disable_daemon(void)
585 {
586         /* Stop the daemon from calling kstop_machine */
587         mutex_lock(&ftraced_lock);
588         ftraced_stop = 1;
589         mutex_unlock(&ftraced_lock);
590
591         ftrace_force_update();
592 }
593
594 void ftrace_enable_daemon(void)
595 {
596         mutex_lock(&ftraced_lock);
597         ftraced_stop = 0;
598         mutex_unlock(&ftraced_lock);
599
600         ftrace_force_update();
601 }
602
603 static ftrace_func_t saved_ftrace_func;
604
605 static void ftrace_startup(void)
606 {
607         int command = 0;
608
609         if (unlikely(ftrace_disabled))
610                 return;
611
612         mutex_lock(&ftraced_lock);
613         ftraced_suspend++;
614         if (ftraced_suspend == 1)
615                 command |= FTRACE_ENABLE_CALLS;
616
617         if (saved_ftrace_func != ftrace_trace_function) {
618                 saved_ftrace_func = ftrace_trace_function;
619                 command |= FTRACE_UPDATE_TRACE_FUNC;
620         }
621
622         if (!command || !ftrace_enabled)
623                 goto out;
624
625         ftrace_run_update_code(command);
626  out:
627         mutex_unlock(&ftraced_lock);
628 }
629
630 static void ftrace_shutdown(void)
631 {
632         int command = 0;
633
634         if (unlikely(ftrace_disabled))
635                 return;
636
637         mutex_lock(&ftraced_lock);
638         ftraced_suspend--;
639         if (!ftraced_suspend)
640                 command |= FTRACE_DISABLE_CALLS;
641
642         if (saved_ftrace_func != ftrace_trace_function) {
643                 saved_ftrace_func = ftrace_trace_function;
644                 command |= FTRACE_UPDATE_TRACE_FUNC;
645         }
646
647         if (!command || !ftrace_enabled)
648                 goto out;
649
650         ftrace_run_update_code(command);
651  out:
652         mutex_unlock(&ftraced_lock);
653 }
654
655 static void ftrace_startup_sysctl(void)
656 {
657         int command = FTRACE_ENABLE_MCOUNT;
658
659         if (unlikely(ftrace_disabled))
660                 return;
661
662         mutex_lock(&ftraced_lock);
663         /* Force update next time */
664         saved_ftrace_func = NULL;
665         /* ftraced_suspend is true if we want ftrace running */
666         if (ftraced_suspend)
667                 command |= FTRACE_ENABLE_CALLS;
668
669         ftrace_run_update_code(command);
670         mutex_unlock(&ftraced_lock);
671 }
672
673 static void ftrace_shutdown_sysctl(void)
674 {
675         int command = FTRACE_DISABLE_MCOUNT;
676
677         if (unlikely(ftrace_disabled))
678                 return;
679
680         mutex_lock(&ftraced_lock);
681         /* ftraced_suspend is true if ftrace is running */
682         if (ftraced_suspend)
683                 command |= FTRACE_DISABLE_CALLS;
684
685         ftrace_run_update_code(command);
686         mutex_unlock(&ftraced_lock);
687 }
688
689 static cycle_t          ftrace_update_time;
690 static unsigned long    ftrace_update_cnt;
691 unsigned long           ftrace_update_tot_cnt;
692
693 static int __ftrace_update_code(void *ignore)
694 {
695         struct dyn_ftrace *p;
696         struct hlist_node *t, *n;
697         int save_ftrace_enabled;
698         cycle_t start, stop;
699         int i;
700
701         /* Don't be recording funcs now */
702         ftrace_record_suspend++;
703         save_ftrace_enabled = ftrace_enabled;
704         ftrace_enabled = 0;
705
706         start = ftrace_now(raw_smp_processor_id());
707         ftrace_update_cnt = 0;
708
709         /* No locks needed, the machine is stopped! */
710         for (i = 0; i < FTRACE_HASHSIZE; i++) {
711                 /* all CPUS are stopped, we are safe to modify code */
712                 hlist_for_each_entry_safe(p, t, n, &ftrace_hash[i], node) {
713                         /* Skip over failed records which have not been
714                          * freed. */
715                         if (p->flags & FTRACE_FL_FAILED)
716                                 continue;
717
718                         /* Unconverted records are always at the head of the
719                          * hash bucket. Once we encounter a converted record,
720                          * simply skip over to the next bucket. Saves ftraced
721                          * some processor cycles (ftrace does its bid for
722                          * global warming :-p ). */
723                         if (p->flags & (FTRACE_FL_CONVERTED))
724                                 break;
725
726                         if (ftrace_code_disable(p)) {
727                                 p->flags |= FTRACE_FL_CONVERTED;
728                                 ftrace_update_cnt++;
729                         } else {
730                                 if ((system_state == SYSTEM_BOOTING) ||
731                                     !core_kernel_text(p->ip)) {
732                                         ftrace_del_hash(p);
733                                         ftrace_free_rec(p);
734                                 }
735                         }
736                 }
737         }
738
739         stop = ftrace_now(raw_smp_processor_id());
740         ftrace_update_time = stop - start;
741         ftrace_update_tot_cnt += ftrace_update_cnt;
742         ftraced_trigger = 0;
743
744         ftrace_enabled = save_ftrace_enabled;
745         ftrace_record_suspend--;
746
747         return 0;
748 }
749
750 static int ftrace_update_code(void)
751 {
752         if (unlikely(ftrace_disabled) ||
753             !ftrace_enabled || !ftraced_trigger)
754                 return 0;
755
756         stop_machine_run(__ftrace_update_code, NULL, NR_CPUS);
757
758         return 1;
759 }
760
761 static int ftraced(void *ignore)
762 {
763         unsigned long usecs;
764
765         while (!kthread_should_stop()) {
766
767                 set_current_state(TASK_INTERRUPTIBLE);
768
769                 /* check once a second */
770                 schedule_timeout(HZ);
771
772                 if (unlikely(ftrace_disabled))
773                         continue;
774
775                 mutex_lock(&ftrace_sysctl_lock);
776                 mutex_lock(&ftraced_lock);
777                 if (!ftraced_suspend && !ftraced_stop &&
778                     ftrace_update_code()) {
779                         usecs = nsecs_to_usecs(ftrace_update_time);
780                         if (ftrace_update_tot_cnt > 100000) {
781                                 ftrace_update_tot_cnt = 0;
782                                 pr_info("hm, dftrace overflow: %lu change%s"
783                                         " (%lu total) in %lu usec%s\n",
784                                         ftrace_update_cnt,
785                                         ftrace_update_cnt != 1 ? "s" : "",
786                                         ftrace_update_tot_cnt,
787                                         usecs, usecs != 1 ? "s" : "");
788                                 ftrace_disabled = 1;
789                                 WARN_ON_ONCE(1);
790                         }
791                 }
792                 mutex_unlock(&ftraced_lock);
793                 mutex_unlock(&ftrace_sysctl_lock);
794
795                 ftrace_shutdown_replenish();
796         }
797         __set_current_state(TASK_RUNNING);
798         return 0;
799 }
800
801 static int __init ftrace_dyn_table_alloc(void)
802 {
803         struct ftrace_page *pg;
804         int cnt;
805         int i;
806
807         /* allocate a few pages */
808         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
809         if (!ftrace_pages_start)
810                 return -1;
811
812         /*
813          * Allocate a few more pages.
814          *
815          * TODO: have some parser search vmlinux before
816          *   final linking to find all calls to ftrace.
817          *   Then we can:
818          *    a) know how many pages to allocate.
819          *     and/or
820          *    b) set up the table then.
821          *
822          *  The dynamic code is still necessary for
823          *  modules.
824          */
825
826         pg = ftrace_pages = ftrace_pages_start;
827
828         cnt = NR_TO_INIT / ENTRIES_PER_PAGE;
829
830         for (i = 0; i < cnt; i++) {
831                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
832
833                 /* If we fail, we'll try later anyway */
834                 if (!pg->next)
835                         break;
836
837                 pg = pg->next;
838         }
839
840         return 0;
841 }
842
843 enum {
844         FTRACE_ITER_FILTER      = (1 << 0),
845         FTRACE_ITER_CONT        = (1 << 1),
846         FTRACE_ITER_NOTRACE     = (1 << 2),
847         FTRACE_ITER_FAILURES    = (1 << 3),
848 };
849
850 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
851
852 struct ftrace_iterator {
853         loff_t                  pos;
854         struct ftrace_page      *pg;
855         unsigned                idx;
856         unsigned                flags;
857         unsigned char           buffer[FTRACE_BUFF_MAX+1];
858         unsigned                buffer_idx;
859         unsigned                filtered;
860 };
861
862 static void *
863 t_next(struct seq_file *m, void *v, loff_t *pos)
864 {
865         struct ftrace_iterator *iter = m->private;
866         struct dyn_ftrace *rec = NULL;
867
868         (*pos)++;
869
870  retry:
871         if (iter->idx >= iter->pg->index) {
872                 if (iter->pg->next) {
873                         iter->pg = iter->pg->next;
874                         iter->idx = 0;
875                         goto retry;
876                 }
877         } else {
878                 rec = &iter->pg->records[iter->idx++];
879                 if ((!(iter->flags & FTRACE_ITER_FAILURES) &&
880                      (rec->flags & FTRACE_FL_FAILED)) ||
881
882                     ((iter->flags & FTRACE_ITER_FAILURES) &&
883                      (!(rec->flags & FTRACE_FL_FAILED) ||
884                       (rec->flags & FTRACE_FL_FREE))) ||
885
886                     ((iter->flags & FTRACE_ITER_FILTER) &&
887                      !(rec->flags & FTRACE_FL_FILTER)) ||
888
889                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
890                      !(rec->flags & FTRACE_FL_NOTRACE))) {
891                         rec = NULL;
892                         goto retry;
893                 }
894         }
895
896         iter->pos = *pos;
897
898         return rec;
899 }
900
901 static void *t_start(struct seq_file *m, loff_t *pos)
902 {
903         struct ftrace_iterator *iter = m->private;
904         void *p = NULL;
905         loff_t l = -1;
906
907         if (*pos != iter->pos) {
908                 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
909                         ;
910         } else {
911                 l = *pos;
912                 p = t_next(m, p, &l);
913         }
914
915         return p;
916 }
917
918 static void t_stop(struct seq_file *m, void *p)
919 {
920 }
921
922 static int t_show(struct seq_file *m, void *v)
923 {
924         struct dyn_ftrace *rec = v;
925         char str[KSYM_SYMBOL_LEN];
926
927         if (!rec)
928                 return 0;
929
930         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
931
932         seq_printf(m, "%s\n", str);
933
934         return 0;
935 }
936
937 static struct seq_operations show_ftrace_seq_ops = {
938         .start = t_start,
939         .next = t_next,
940         .stop = t_stop,
941         .show = t_show,
942 };
943
944 static int
945 ftrace_avail_open(struct inode *inode, struct file *file)
946 {
947         struct ftrace_iterator *iter;
948         int ret;
949
950         if (unlikely(ftrace_disabled))
951                 return -ENODEV;
952
953         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
954         if (!iter)
955                 return -ENOMEM;
956
957         iter->pg = ftrace_pages_start;
958         iter->pos = -1;
959
960         ret = seq_open(file, &show_ftrace_seq_ops);
961         if (!ret) {
962                 struct seq_file *m = file->private_data;
963
964                 m->private = iter;
965         } else {
966                 kfree(iter);
967         }
968
969         return ret;
970 }
971
972 int ftrace_avail_release(struct inode *inode, struct file *file)
973 {
974         struct seq_file *m = (struct seq_file *)file->private_data;
975         struct ftrace_iterator *iter = m->private;
976
977         seq_release(inode, file);
978         kfree(iter);
979
980         return 0;
981 }
982
983 static int
984 ftrace_failures_open(struct inode *inode, struct file *file)
985 {
986         int ret;
987         struct seq_file *m;
988         struct ftrace_iterator *iter;
989
990         ret = ftrace_avail_open(inode, file);
991         if (!ret) {
992                 m = (struct seq_file *)file->private_data;
993                 iter = (struct ftrace_iterator *)m->private;
994                 iter->flags = FTRACE_ITER_FAILURES;
995         }
996
997         return ret;
998 }
999
1000
1001 static void ftrace_filter_reset(int enable)
1002 {
1003         struct ftrace_page *pg;
1004         struct dyn_ftrace *rec;
1005         unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1006         unsigned i;
1007
1008         /* keep kstop machine from running */
1009         preempt_disable();
1010         if (enable)
1011                 ftrace_filtered = 0;
1012         pg = ftrace_pages_start;
1013         while (pg) {
1014                 for (i = 0; i < pg->index; i++) {
1015                         rec = &pg->records[i];
1016                         if (rec->flags & FTRACE_FL_FAILED)
1017                                 continue;
1018                         rec->flags &= ~type;
1019                 }
1020                 pg = pg->next;
1021         }
1022         preempt_enable();
1023 }
1024
1025 static int
1026 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1027 {
1028         struct ftrace_iterator *iter;
1029         int ret = 0;
1030
1031         if (unlikely(ftrace_disabled))
1032                 return -ENODEV;
1033
1034         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1035         if (!iter)
1036                 return -ENOMEM;
1037
1038         mutex_lock(&ftrace_regex_lock);
1039         if ((file->f_mode & FMODE_WRITE) &&
1040             !(file->f_flags & O_APPEND))
1041                 ftrace_filter_reset(enable);
1042
1043         if (file->f_mode & FMODE_READ) {
1044                 iter->pg = ftrace_pages_start;
1045                 iter->pos = -1;
1046                 iter->flags = enable ? FTRACE_ITER_FILTER :
1047                         FTRACE_ITER_NOTRACE;
1048
1049                 ret = seq_open(file, &show_ftrace_seq_ops);
1050                 if (!ret) {
1051                         struct seq_file *m = file->private_data;
1052                         m->private = iter;
1053                 } else
1054                         kfree(iter);
1055         } else
1056                 file->private_data = iter;
1057         mutex_unlock(&ftrace_regex_lock);
1058
1059         return ret;
1060 }
1061
1062 static int
1063 ftrace_filter_open(struct inode *inode, struct file *file)
1064 {
1065         return ftrace_regex_open(inode, file, 1);
1066 }
1067
1068 static int
1069 ftrace_notrace_open(struct inode *inode, struct file *file)
1070 {
1071         return ftrace_regex_open(inode, file, 0);
1072 }
1073
1074 static ssize_t
1075 ftrace_regex_read(struct file *file, char __user *ubuf,
1076                        size_t cnt, loff_t *ppos)
1077 {
1078         if (file->f_mode & FMODE_READ)
1079                 return seq_read(file, ubuf, cnt, ppos);
1080         else
1081                 return -EPERM;
1082 }
1083
1084 static loff_t
1085 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1086 {
1087         loff_t ret;
1088
1089         if (file->f_mode & FMODE_READ)
1090                 ret = seq_lseek(file, offset, origin);
1091         else
1092                 file->f_pos = ret = 1;
1093
1094         return ret;
1095 }
1096
1097 enum {
1098         MATCH_FULL,
1099         MATCH_FRONT_ONLY,
1100         MATCH_MIDDLE_ONLY,
1101         MATCH_END_ONLY,
1102 };
1103
1104 static void
1105 ftrace_match(unsigned char *buff, int len, int enable)
1106 {
1107         char str[KSYM_SYMBOL_LEN];
1108         char *search = NULL;
1109         struct ftrace_page *pg;
1110         struct dyn_ftrace *rec;
1111         int type = MATCH_FULL;
1112         unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1113         unsigned i, match = 0, search_len = 0;
1114
1115         for (i = 0; i < len; i++) {
1116                 if (buff[i] == '*') {
1117                         if (!i) {
1118                                 search = buff + i + 1;
1119                                 type = MATCH_END_ONLY;
1120                                 search_len = len - (i + 1);
1121                         } else {
1122                                 if (type == MATCH_END_ONLY) {
1123                                         type = MATCH_MIDDLE_ONLY;
1124                                 } else {
1125                                         match = i;
1126                                         type = MATCH_FRONT_ONLY;
1127                                 }
1128                                 buff[i] = 0;
1129                                 break;
1130                         }
1131                 }
1132         }
1133
1134         /* keep kstop machine from running */
1135         preempt_disable();
1136         if (enable)
1137                 ftrace_filtered = 1;
1138         pg = ftrace_pages_start;
1139         while (pg) {
1140                 for (i = 0; i < pg->index; i++) {
1141                         int matched = 0;
1142                         char *ptr;
1143
1144                         rec = &pg->records[i];
1145                         if (rec->flags & FTRACE_FL_FAILED)
1146                                 continue;
1147                         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1148                         switch (type) {
1149                         case MATCH_FULL:
1150                                 if (strcmp(str, buff) == 0)
1151                                         matched = 1;
1152                                 break;
1153                         case MATCH_FRONT_ONLY:
1154                                 if (memcmp(str, buff, match) == 0)
1155                                         matched = 1;
1156                                 break;
1157                         case MATCH_MIDDLE_ONLY:
1158                                 if (strstr(str, search))
1159                                         matched = 1;
1160                                 break;
1161                         case MATCH_END_ONLY:
1162                                 ptr = strstr(str, search);
1163                                 if (ptr && (ptr[search_len] == 0))
1164                                         matched = 1;
1165                                 break;
1166                         }
1167                         if (matched)
1168                                 rec->flags |= flag;
1169                 }
1170                 pg = pg->next;
1171         }
1172         preempt_enable();
1173 }
1174
1175 static ssize_t
1176 ftrace_regex_write(struct file *file, const char __user *ubuf,
1177                    size_t cnt, loff_t *ppos, int enable)
1178 {
1179         struct ftrace_iterator *iter;
1180         char ch;
1181         size_t read = 0;
1182         ssize_t ret;
1183
1184         if (!cnt || cnt < 0)
1185                 return 0;
1186
1187         mutex_lock(&ftrace_regex_lock);
1188
1189         if (file->f_mode & FMODE_READ) {
1190                 struct seq_file *m = file->private_data;
1191                 iter = m->private;
1192         } else
1193                 iter = file->private_data;
1194
1195         if (!*ppos) {
1196                 iter->flags &= ~FTRACE_ITER_CONT;
1197                 iter->buffer_idx = 0;
1198         }
1199
1200         ret = get_user(ch, ubuf++);
1201         if (ret)
1202                 goto out;
1203         read++;
1204         cnt--;
1205
1206         if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1207                 /* skip white space */
1208                 while (cnt && isspace(ch)) {
1209                         ret = get_user(ch, ubuf++);
1210                         if (ret)
1211                                 goto out;
1212                         read++;
1213                         cnt--;
1214                 }
1215
1216                 if (isspace(ch)) {
1217                         file->f_pos += read;
1218                         ret = read;
1219                         goto out;
1220                 }
1221
1222                 iter->buffer_idx = 0;
1223         }
1224
1225         while (cnt && !isspace(ch)) {
1226                 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1227                         iter->buffer[iter->buffer_idx++] = ch;
1228                 else {
1229                         ret = -EINVAL;
1230                         goto out;
1231                 }
1232                 ret = get_user(ch, ubuf++);
1233                 if (ret)
1234                         goto out;
1235                 read++;
1236                 cnt--;
1237         }
1238
1239         if (isspace(ch)) {
1240                 iter->filtered++;
1241                 iter->buffer[iter->buffer_idx] = 0;
1242                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1243                 iter->buffer_idx = 0;
1244         } else
1245                 iter->flags |= FTRACE_ITER_CONT;
1246
1247
1248         file->f_pos += read;
1249
1250         ret = read;
1251  out:
1252         mutex_unlock(&ftrace_regex_lock);
1253
1254         return ret;
1255 }
1256
1257 static ssize_t
1258 ftrace_filter_write(struct file *file, const char __user *ubuf,
1259                     size_t cnt, loff_t *ppos)
1260 {
1261         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1262 }
1263
1264 static ssize_t
1265 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1266                      size_t cnt, loff_t *ppos)
1267 {
1268         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1269 }
1270
1271 static void
1272 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1273 {
1274         if (unlikely(ftrace_disabled))
1275                 return;
1276
1277         mutex_lock(&ftrace_regex_lock);
1278         if (reset)
1279                 ftrace_filter_reset(enable);
1280         if (buf)
1281                 ftrace_match(buf, len, enable);
1282         mutex_unlock(&ftrace_regex_lock);
1283 }
1284
1285 /**
1286  * ftrace_set_filter - set a function to filter on in ftrace
1287  * @buf - the string that holds the function filter text.
1288  * @len - the length of the string.
1289  * @reset - non zero to reset all filters before applying this filter.
1290  *
1291  * Filters denote which functions should be enabled when tracing is enabled.
1292  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1293  */
1294 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1295 {
1296         ftrace_set_regex(buf, len, reset, 1);
1297 }
1298
1299 /**
1300  * ftrace_set_notrace - set a function to not trace in ftrace
1301  * @buf - the string that holds the function notrace text.
1302  * @len - the length of the string.
1303  * @reset - non zero to reset all filters before applying this filter.
1304  *
1305  * Notrace Filters denote which functions should not be enabled when tracing
1306  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1307  * for tracing.
1308  */
1309 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1310 {
1311         ftrace_set_regex(buf, len, reset, 0);
1312 }
1313
1314 static int
1315 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1316 {
1317         struct seq_file *m = (struct seq_file *)file->private_data;
1318         struct ftrace_iterator *iter;
1319
1320         mutex_lock(&ftrace_regex_lock);
1321         if (file->f_mode & FMODE_READ) {
1322                 iter = m->private;
1323
1324                 seq_release(inode, file);
1325         } else
1326                 iter = file->private_data;
1327
1328         if (iter->buffer_idx) {
1329                 iter->filtered++;
1330                 iter->buffer[iter->buffer_idx] = 0;
1331                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1332         }
1333
1334         mutex_lock(&ftrace_sysctl_lock);
1335         mutex_lock(&ftraced_lock);
1336         if (iter->filtered && ftraced_suspend && ftrace_enabled)
1337                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1338         mutex_unlock(&ftraced_lock);
1339         mutex_unlock(&ftrace_sysctl_lock);
1340
1341         kfree(iter);
1342         mutex_unlock(&ftrace_regex_lock);
1343         return 0;
1344 }
1345
1346 static int
1347 ftrace_filter_release(struct inode *inode, struct file *file)
1348 {
1349         return ftrace_regex_release(inode, file, 1);
1350 }
1351
1352 static int
1353 ftrace_notrace_release(struct inode *inode, struct file *file)
1354 {
1355         return ftrace_regex_release(inode, file, 0);
1356 }
1357
1358 static ssize_t
1359 ftraced_read(struct file *filp, char __user *ubuf,
1360                      size_t cnt, loff_t *ppos)
1361 {
1362         /* don't worry about races */
1363         char *buf = ftraced_stop ? "disabled\n" : "enabled\n";
1364         int r = strlen(buf);
1365
1366         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1367 }
1368
1369 static ssize_t
1370 ftraced_write(struct file *filp, const char __user *ubuf,
1371                       size_t cnt, loff_t *ppos)
1372 {
1373         char buf[64];
1374         long val;
1375         int ret;
1376
1377         if (cnt >= sizeof(buf))
1378                 return -EINVAL;
1379
1380         if (copy_from_user(&buf, ubuf, cnt))
1381                 return -EFAULT;
1382
1383         if (strncmp(buf, "enable", 6) == 0)
1384                 val = 1;
1385         else if (strncmp(buf, "disable", 7) == 0)
1386                 val = 0;
1387         else {
1388                 buf[cnt] = 0;
1389
1390                 ret = strict_strtoul(buf, 10, &val);
1391                 if (ret < 0)
1392                         return ret;
1393
1394                 val = !!val;
1395         }
1396
1397         if (val)
1398                 ftrace_enable_daemon();
1399         else
1400                 ftrace_disable_daemon();
1401
1402         filp->f_pos += cnt;
1403
1404         return cnt;
1405 }
1406
1407 static struct file_operations ftrace_avail_fops = {
1408         .open = ftrace_avail_open,
1409         .read = seq_read,
1410         .llseek = seq_lseek,
1411         .release = ftrace_avail_release,
1412 };
1413
1414 static struct file_operations ftrace_failures_fops = {
1415         .open = ftrace_failures_open,
1416         .read = seq_read,
1417         .llseek = seq_lseek,
1418         .release = ftrace_avail_release,
1419 };
1420
1421 static struct file_operations ftrace_filter_fops = {
1422         .open = ftrace_filter_open,
1423         .read = ftrace_regex_read,
1424         .write = ftrace_filter_write,
1425         .llseek = ftrace_regex_lseek,
1426         .release = ftrace_filter_release,
1427 };
1428
1429 static struct file_operations ftrace_notrace_fops = {
1430         .open = ftrace_notrace_open,
1431         .read = ftrace_regex_read,
1432         .write = ftrace_notrace_write,
1433         .llseek = ftrace_regex_lseek,
1434         .release = ftrace_notrace_release,
1435 };
1436
1437 static struct file_operations ftraced_fops = {
1438         .open = tracing_open_generic,
1439         .read = ftraced_read,
1440         .write = ftraced_write,
1441 };
1442
1443 /**
1444  * ftrace_force_update - force an update to all recording ftrace functions
1445  */
1446 int ftrace_force_update(void)
1447 {
1448         int ret = 0;
1449
1450         if (unlikely(ftrace_disabled))
1451                 return -ENODEV;
1452
1453         mutex_lock(&ftrace_sysctl_lock);
1454         mutex_lock(&ftraced_lock);
1455
1456         /*
1457          * If ftraced_trigger is not set, then there is nothing
1458          * to update.
1459          */
1460         if (ftraced_trigger && !ftrace_update_code())
1461                 ret = -EBUSY;
1462
1463         mutex_unlock(&ftraced_lock);
1464         mutex_unlock(&ftrace_sysctl_lock);
1465
1466         return ret;
1467 }
1468
1469 static void ftrace_force_shutdown(void)
1470 {
1471         struct task_struct *task;
1472         int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
1473
1474         mutex_lock(&ftraced_lock);
1475         task = ftraced_task;
1476         ftraced_task = NULL;
1477         ftraced_suspend = -1;
1478         ftrace_run_update_code(command);
1479         mutex_unlock(&ftraced_lock);
1480
1481         if (task)
1482                 kthread_stop(task);
1483 }
1484
1485 static __init int ftrace_init_debugfs(void)
1486 {
1487         struct dentry *d_tracer;
1488         struct dentry *entry;
1489
1490         d_tracer = tracing_init_dentry();
1491
1492         entry = debugfs_create_file("available_filter_functions", 0444,
1493                                     d_tracer, NULL, &ftrace_avail_fops);
1494         if (!entry)
1495                 pr_warning("Could not create debugfs "
1496                            "'available_filter_functions' entry\n");
1497
1498         entry = debugfs_create_file("failures", 0444,
1499                                     d_tracer, NULL, &ftrace_failures_fops);
1500         if (!entry)
1501                 pr_warning("Could not create debugfs 'failures' entry\n");
1502
1503         entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1504                                     NULL, &ftrace_filter_fops);
1505         if (!entry)
1506                 pr_warning("Could not create debugfs "
1507                            "'set_ftrace_filter' entry\n");
1508
1509         entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1510                                     NULL, &ftrace_notrace_fops);
1511         if (!entry)
1512                 pr_warning("Could not create debugfs "
1513                            "'set_ftrace_notrace' entry\n");
1514
1515         entry = debugfs_create_file("ftraced_enabled", 0644, d_tracer,
1516                                     NULL, &ftraced_fops);
1517         if (!entry)
1518                 pr_warning("Could not create debugfs "
1519                            "'ftraced_enabled' entry\n");
1520         return 0;
1521 }
1522
1523 fs_initcall(ftrace_init_debugfs);
1524
1525 static int __init ftrace_dynamic_init(void)
1526 {
1527         struct task_struct *p;
1528         unsigned long addr;
1529         int ret;
1530
1531         addr = (unsigned long)ftrace_record_ip;
1532
1533         stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS);
1534
1535         /* ftrace_dyn_arch_init places the return code in addr */
1536         if (addr) {
1537                 ret = (int)addr;
1538                 goto failed;
1539         }
1540
1541         ret = ftrace_dyn_table_alloc();
1542         if (ret)
1543                 goto failed;
1544
1545         p = kthread_run(ftraced, NULL, "ftraced");
1546         if (IS_ERR(p)) {
1547                 ret = -1;
1548                 goto failed;
1549         }
1550
1551         last_ftrace_enabled = ftrace_enabled = 1;
1552         ftraced_task = p;
1553
1554         return 0;
1555
1556  failed:
1557         ftrace_disabled = 1;
1558         return ret;
1559 }
1560
1561 core_initcall(ftrace_dynamic_init);
1562 #else
1563 # define ftrace_startup()               do { } while (0)
1564 # define ftrace_shutdown()              do { } while (0)
1565 # define ftrace_startup_sysctl()        do { } while (0)
1566 # define ftrace_shutdown_sysctl()       do { } while (0)
1567 # define ftrace_force_shutdown()        do { } while (0)
1568 #endif /* CONFIG_DYNAMIC_FTRACE */
1569
1570 /**
1571  * ftrace_kill - totally shutdown ftrace
1572  *
1573  * This is a safety measure. If something was detected that seems
1574  * wrong, calling this function will keep ftrace from doing
1575  * any more modifications, and updates.
1576  * used when something went wrong.
1577  */
1578 void ftrace_kill(void)
1579 {
1580         mutex_lock(&ftrace_sysctl_lock);
1581         ftrace_disabled = 1;
1582         ftrace_enabled = 0;
1583
1584         clear_ftrace_function();
1585         mutex_unlock(&ftrace_sysctl_lock);
1586
1587         /* Try to totally disable ftrace */
1588         ftrace_force_shutdown();
1589 }
1590
1591 /**
1592  * register_ftrace_function - register a function for profiling
1593  * @ops - ops structure that holds the function for profiling.
1594  *
1595  * Register a function to be called by all functions in the
1596  * kernel.
1597  *
1598  * Note: @ops->func and all the functions it calls must be labeled
1599  *       with "notrace", otherwise it will go into a
1600  *       recursive loop.
1601  */
1602 int register_ftrace_function(struct ftrace_ops *ops)
1603 {
1604         int ret;
1605
1606         if (unlikely(ftrace_disabled))
1607                 return -1;
1608
1609         mutex_lock(&ftrace_sysctl_lock);
1610         ret = __register_ftrace_function(ops);
1611         ftrace_startup();
1612         mutex_unlock(&ftrace_sysctl_lock);
1613
1614         return ret;
1615 }
1616
1617 /**
1618  * unregister_ftrace_function - unresgister a function for profiling.
1619  * @ops - ops structure that holds the function to unregister
1620  *
1621  * Unregister a function that was added to be called by ftrace profiling.
1622  */
1623 int unregister_ftrace_function(struct ftrace_ops *ops)
1624 {
1625         int ret;
1626
1627         mutex_lock(&ftrace_sysctl_lock);
1628         ret = __unregister_ftrace_function(ops);
1629         ftrace_shutdown();
1630         mutex_unlock(&ftrace_sysctl_lock);
1631
1632         return ret;
1633 }
1634
1635 int
1636 ftrace_enable_sysctl(struct ctl_table *table, int write,
1637                      struct file *file, void __user *buffer, size_t *lenp,
1638                      loff_t *ppos)
1639 {
1640         int ret;
1641
1642         if (unlikely(ftrace_disabled))
1643                 return -ENODEV;
1644
1645         mutex_lock(&ftrace_sysctl_lock);
1646
1647         ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
1648
1649         if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1650                 goto out;
1651
1652         last_ftrace_enabled = ftrace_enabled;
1653
1654         if (ftrace_enabled) {
1655
1656                 ftrace_startup_sysctl();
1657
1658                 /* we are starting ftrace again */
1659                 if (ftrace_list != &ftrace_list_end) {
1660                         if (ftrace_list->next == &ftrace_list_end)
1661                                 ftrace_trace_function = ftrace_list->func;
1662                         else
1663                                 ftrace_trace_function = ftrace_list_func;
1664                 }
1665
1666         } else {
1667                 /* stopping ftrace calls (just send to ftrace_stub) */
1668                 ftrace_trace_function = ftrace_stub;
1669
1670                 ftrace_shutdown_sysctl();
1671         }
1672
1673  out:
1674         mutex_unlock(&ftrace_sysctl_lock);
1675         return ret;
1676 }