ftrace: prevent freeing of all failed updates
[safe/jmp/linux-2.6] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/ftrace.h>
25 #include <linux/sysctl.h>
26 #include <linux/ctype.h>
27 #include <linux/hash.h>
28 #include <linux/list.h>
29
30 #include "trace.h"
31
32 /* ftrace_enabled is a method to turn ftrace on or off */
33 int ftrace_enabled __read_mostly;
34 static int last_ftrace_enabled;
35
36 /*
37  * ftrace_disabled is set when an anomaly is discovered.
38  * ftrace_disabled is much stronger than ftrace_enabled.
39  */
40 static int ftrace_disabled __read_mostly;
41
42 static DEFINE_SPINLOCK(ftrace_lock);
43 static DEFINE_MUTEX(ftrace_sysctl_lock);
44
45 static struct ftrace_ops ftrace_list_end __read_mostly =
46 {
47         .func = ftrace_stub,
48 };
49
50 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
51 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
52
53 void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
54 {
55         struct ftrace_ops *op = ftrace_list;
56
57         /* in case someone actually ports this to alpha! */
58         read_barrier_depends();
59
60         while (op != &ftrace_list_end) {
61                 /* silly alpha */
62                 read_barrier_depends();
63                 op->func(ip, parent_ip);
64                 op = op->next;
65         };
66 }
67
68 /**
69  * clear_ftrace_function - reset the ftrace function
70  *
71  * This NULLs the ftrace function and in essence stops
72  * tracing.  There may be lag
73  */
74 void clear_ftrace_function(void)
75 {
76         ftrace_trace_function = ftrace_stub;
77 }
78
79 static int __register_ftrace_function(struct ftrace_ops *ops)
80 {
81         /* Should never be called by interrupts */
82         spin_lock(&ftrace_lock);
83
84         ops->next = ftrace_list;
85         /*
86          * We are entering ops into the ftrace_list but another
87          * CPU might be walking that list. We need to make sure
88          * the ops->next pointer is valid before another CPU sees
89          * the ops pointer included into the ftrace_list.
90          */
91         smp_wmb();
92         ftrace_list = ops;
93
94         if (ftrace_enabled) {
95                 /*
96                  * For one func, simply call it directly.
97                  * For more than one func, call the chain.
98                  */
99                 if (ops->next == &ftrace_list_end)
100                         ftrace_trace_function = ops->func;
101                 else
102                         ftrace_trace_function = ftrace_list_func;
103         }
104
105         spin_unlock(&ftrace_lock);
106
107         return 0;
108 }
109
110 static int __unregister_ftrace_function(struct ftrace_ops *ops)
111 {
112         struct ftrace_ops **p;
113         int ret = 0;
114
115         spin_lock(&ftrace_lock);
116
117         /*
118          * If we are removing the last function, then simply point
119          * to the ftrace_stub.
120          */
121         if (ftrace_list == ops && ops->next == &ftrace_list_end) {
122                 ftrace_trace_function = ftrace_stub;
123                 ftrace_list = &ftrace_list_end;
124                 goto out;
125         }
126
127         for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
128                 if (*p == ops)
129                         break;
130
131         if (*p != ops) {
132                 ret = -1;
133                 goto out;
134         }
135
136         *p = (*p)->next;
137
138         if (ftrace_enabled) {
139                 /* If we only have one func left, then call that directly */
140                 if (ftrace_list == &ftrace_list_end ||
141                     ftrace_list->next == &ftrace_list_end)
142                         ftrace_trace_function = ftrace_list->func;
143         }
144
145  out:
146         spin_unlock(&ftrace_lock);
147
148         return ret;
149 }
150
151 #ifdef CONFIG_DYNAMIC_FTRACE
152
153 static struct task_struct *ftraced_task;
154
155 enum {
156         FTRACE_ENABLE_CALLS             = (1 << 0),
157         FTRACE_DISABLE_CALLS            = (1 << 1),
158         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
159         FTRACE_ENABLE_MCOUNT            = (1 << 3),
160         FTRACE_DISABLE_MCOUNT           = (1 << 4),
161 };
162
163 static int ftrace_filtered;
164
165 static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
166
167 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
168
169 static DEFINE_SPINLOCK(ftrace_shutdown_lock);
170 static DEFINE_MUTEX(ftraced_lock);
171 static DEFINE_MUTEX(ftrace_regex_lock);
172
173 struct ftrace_page {
174         struct ftrace_page      *next;
175         unsigned long           index;
176         struct dyn_ftrace       records[];
177 };
178
179 #define ENTRIES_PER_PAGE \
180   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
181
182 /* estimate from running different kernels */
183 #define NR_TO_INIT              10000
184
185 static struct ftrace_page       *ftrace_pages_start;
186 static struct ftrace_page       *ftrace_pages;
187
188 static int ftraced_trigger;
189 static int ftraced_suspend;
190 static int ftraced_stop;
191
192 static int ftrace_record_suspend;
193
194 static struct dyn_ftrace *ftrace_free_records;
195
196 static inline int
197 ftrace_ip_in_hash(unsigned long ip, unsigned long key)
198 {
199         struct dyn_ftrace *p;
200         struct hlist_node *t;
201         int found = 0;
202
203         hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) {
204                 if (p->ip == ip) {
205                         found = 1;
206                         break;
207                 }
208         }
209
210         return found;
211 }
212
213 static inline void
214 ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
215 {
216         hlist_add_head_rcu(&node->node, &ftrace_hash[key]);
217 }
218
219 /* called from kstop_machine */
220 static inline void ftrace_del_hash(struct dyn_ftrace *node)
221 {
222         hlist_del(&node->node);
223 }
224
225 static void ftrace_free_rec(struct dyn_ftrace *rec)
226 {
227         /* no locking, only called from kstop_machine */
228
229         rec->ip = (unsigned long)ftrace_free_records;
230         ftrace_free_records = rec;
231         rec->flags |= FTRACE_FL_FREE;
232 }
233
234 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
235 {
236         struct dyn_ftrace *rec;
237
238         /* First check for freed records */
239         if (ftrace_free_records) {
240                 rec = ftrace_free_records;
241
242                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
243                         WARN_ON_ONCE(1);
244                         ftrace_free_records = NULL;
245                         ftrace_disabled = 1;
246                         ftrace_enabled = 0;
247                         return NULL;
248                 }
249
250                 ftrace_free_records = (void *)rec->ip;
251                 memset(rec, 0, sizeof(*rec));
252                 return rec;
253         }
254
255         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
256                 if (!ftrace_pages->next)
257                         return NULL;
258                 ftrace_pages = ftrace_pages->next;
259         }
260
261         return &ftrace_pages->records[ftrace_pages->index++];
262 }
263
264 static void
265 ftrace_record_ip(unsigned long ip)
266 {
267         struct dyn_ftrace *node;
268         unsigned long flags;
269         unsigned long key;
270         int resched;
271         int atomic;
272         int cpu;
273
274         if (!ftrace_enabled || ftrace_disabled)
275                 return;
276
277         resched = need_resched();
278         preempt_disable_notrace();
279
280         /*
281          * We simply need to protect against recursion.
282          * Use the the raw version of smp_processor_id and not
283          * __get_cpu_var which can call debug hooks that can
284          * cause a recursive crash here.
285          */
286         cpu = raw_smp_processor_id();
287         per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
288         if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
289                 goto out;
290
291         if (unlikely(ftrace_record_suspend))
292                 goto out;
293
294         key = hash_long(ip, FTRACE_HASHBITS);
295
296         WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
297
298         if (ftrace_ip_in_hash(ip, key))
299                 goto out;
300
301         atomic = irqs_disabled();
302
303         spin_lock_irqsave(&ftrace_shutdown_lock, flags);
304
305         /* This ip may have hit the hash before the lock */
306         if (ftrace_ip_in_hash(ip, key))
307                 goto out_unlock;
308
309         node = ftrace_alloc_dyn_node(ip);
310         if (!node)
311                 goto out_unlock;
312
313         node->ip = ip;
314
315         ftrace_add_hash(node, key);
316
317         ftraced_trigger = 1;
318
319  out_unlock:
320         spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
321  out:
322         per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
323
324         /* prevent recursion with scheduler */
325         if (resched)
326                 preempt_enable_no_resched_notrace();
327         else
328                 preempt_enable_notrace();
329 }
330
331 #define FTRACE_ADDR ((long)(ftrace_caller))
332 #define MCOUNT_ADDR ((long)(mcount))
333
334 static int
335 __ftrace_replace_code(struct dyn_ftrace *rec,
336                       unsigned char *old, unsigned char *new, int enable)
337 {
338         unsigned long ip, fl;
339
340         ip = rec->ip;
341
342         if (ftrace_filtered && enable) {
343                 /*
344                  * If filtering is on:
345                  *
346                  * If this record is set to be filtered and
347                  * is enabled then do nothing.
348                  *
349                  * If this record is set to be filtered and
350                  * it is not enabled, enable it.
351                  *
352                  * If this record is not set to be filtered
353                  * and it is not enabled do nothing.
354                  *
355                  * If this record is set not to trace then
356                  * do nothing.
357                  *
358                  * If this record is not set to be filtered and
359                  * it is enabled, disable it.
360                  */
361                 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
362
363                 if ((fl ==  (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
364                     (fl == 0) || (rec->flags & FTRACE_FL_NOTRACE))
365                         return 0;
366
367                 /*
368                  * If it is enabled disable it,
369                  * otherwise enable it!
370                  */
371                 if (fl == FTRACE_FL_ENABLED) {
372                         /* swap new and old */
373                         new = old;
374                         old = ftrace_call_replace(ip, FTRACE_ADDR);
375                         rec->flags &= ~FTRACE_FL_ENABLED;
376                 } else {
377                         new = ftrace_call_replace(ip, FTRACE_ADDR);
378                         rec->flags |= FTRACE_FL_ENABLED;
379                 }
380         } else {
381
382                 if (enable) {
383                         /*
384                          * If this record is set not to trace and is
385                          * not enabled, do nothing.
386                          */
387                         fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
388                         if (fl == FTRACE_FL_NOTRACE)
389                                 return 0;
390
391                         new = ftrace_call_replace(ip, FTRACE_ADDR);
392                 } else
393                         old = ftrace_call_replace(ip, FTRACE_ADDR);
394
395                 if (enable) {
396                         if (rec->flags & FTRACE_FL_ENABLED)
397                                 return 0;
398                         rec->flags |= FTRACE_FL_ENABLED;
399                 } else {
400                         if (!(rec->flags & FTRACE_FL_ENABLED))
401                                 return 0;
402                         rec->flags &= ~FTRACE_FL_ENABLED;
403                 }
404         }
405
406         return ftrace_modify_code(ip, old, new);
407 }
408
409 static void ftrace_replace_code(int enable)
410 {
411         int i, failed;
412         unsigned char *new = NULL, *old = NULL;
413         struct dyn_ftrace *rec;
414         struct ftrace_page *pg;
415
416         if (enable)
417                 old = ftrace_nop_replace();
418         else
419                 new = ftrace_nop_replace();
420
421         for (pg = ftrace_pages_start; pg; pg = pg->next) {
422                 for (i = 0; i < pg->index; i++) {
423                         rec = &pg->records[i];
424
425                         /* don't modify code that has already faulted */
426                         if (rec->flags & FTRACE_FL_FAILED)
427                                 continue;
428
429                         failed = __ftrace_replace_code(rec, old, new, enable);
430                         if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
431                                 rec->flags |= FTRACE_FL_FAILED;
432                                 if ((system_state == SYSTEM_BOOTING) ||
433                                     !core_kernel_text(rec->ip)) {
434                                         ftrace_del_hash(rec);
435                                         ftrace_free_rec(rec);
436                                 }
437                         }
438                 }
439         }
440 }
441
442 static void ftrace_shutdown_replenish(void)
443 {
444         if (ftrace_pages->next)
445                 return;
446
447         /* allocate another page */
448         ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
449 }
450
451 static int
452 ftrace_code_disable(struct dyn_ftrace *rec)
453 {
454         unsigned long ip;
455         unsigned char *nop, *call;
456         int failed;
457
458         ip = rec->ip;
459
460         nop = ftrace_nop_replace();
461         call = ftrace_call_replace(ip, MCOUNT_ADDR);
462
463         failed = ftrace_modify_code(ip, call, nop);
464         if (failed) {
465                 rec->flags |= FTRACE_FL_FAILED;
466                 return 0;
467         }
468         return 1;
469 }
470
471 static int __ftrace_update_code(void *ignore);
472
473 static int __ftrace_modify_code(void *data)
474 {
475         unsigned long addr;
476         int *command = data;
477
478         if (*command & FTRACE_ENABLE_CALLS) {
479                 /*
480                  * Update any recorded ips now that we have the
481                  * machine stopped
482                  */
483                 __ftrace_update_code(NULL);
484                 ftrace_replace_code(1);
485         } else if (*command & FTRACE_DISABLE_CALLS)
486                 ftrace_replace_code(0);
487
488         if (*command & FTRACE_UPDATE_TRACE_FUNC)
489                 ftrace_update_ftrace_func(ftrace_trace_function);
490
491         if (*command & FTRACE_ENABLE_MCOUNT) {
492                 addr = (unsigned long)ftrace_record_ip;
493                 ftrace_mcount_set(&addr);
494         } else if (*command & FTRACE_DISABLE_MCOUNT) {
495                 addr = (unsigned long)ftrace_stub;
496                 ftrace_mcount_set(&addr);
497         }
498
499         return 0;
500 }
501
502 static void ftrace_run_update_code(int command)
503 {
504         stop_machine_run(__ftrace_modify_code, &command, NR_CPUS);
505 }
506
507 void ftrace_disable_daemon(void)
508 {
509         /* Stop the daemon from calling kstop_machine */
510         mutex_lock(&ftraced_lock);
511         ftraced_stop = 1;
512         mutex_unlock(&ftraced_lock);
513
514         ftrace_force_update();
515 }
516
517 void ftrace_enable_daemon(void)
518 {
519         mutex_lock(&ftraced_lock);
520         ftraced_stop = 0;
521         mutex_unlock(&ftraced_lock);
522
523         ftrace_force_update();
524 }
525
526 static ftrace_func_t saved_ftrace_func;
527
528 static void ftrace_startup(void)
529 {
530         int command = 0;
531
532         if (unlikely(ftrace_disabled))
533                 return;
534
535         mutex_lock(&ftraced_lock);
536         ftraced_suspend++;
537         if (ftraced_suspend == 1)
538                 command |= FTRACE_ENABLE_CALLS;
539
540         if (saved_ftrace_func != ftrace_trace_function) {
541                 saved_ftrace_func = ftrace_trace_function;
542                 command |= FTRACE_UPDATE_TRACE_FUNC;
543         }
544
545         if (!command || !ftrace_enabled)
546                 goto out;
547
548         ftrace_run_update_code(command);
549  out:
550         mutex_unlock(&ftraced_lock);
551 }
552
553 static void ftrace_shutdown(void)
554 {
555         int command = 0;
556
557         if (unlikely(ftrace_disabled))
558                 return;
559
560         mutex_lock(&ftraced_lock);
561         ftraced_suspend--;
562         if (!ftraced_suspend)
563                 command |= FTRACE_DISABLE_CALLS;
564
565         if (saved_ftrace_func != ftrace_trace_function) {
566                 saved_ftrace_func = ftrace_trace_function;
567                 command |= FTRACE_UPDATE_TRACE_FUNC;
568         }
569
570         if (!command || !ftrace_enabled)
571                 goto out;
572
573         ftrace_run_update_code(command);
574  out:
575         mutex_unlock(&ftraced_lock);
576 }
577
578 static void ftrace_startup_sysctl(void)
579 {
580         int command = FTRACE_ENABLE_MCOUNT;
581
582         if (unlikely(ftrace_disabled))
583                 return;
584
585         mutex_lock(&ftraced_lock);
586         /* Force update next time */
587         saved_ftrace_func = NULL;
588         /* ftraced_suspend is true if we want ftrace running */
589         if (ftraced_suspend)
590                 command |= FTRACE_ENABLE_CALLS;
591
592         ftrace_run_update_code(command);
593         mutex_unlock(&ftraced_lock);
594 }
595
596 static void ftrace_shutdown_sysctl(void)
597 {
598         int command = FTRACE_DISABLE_MCOUNT;
599
600         if (unlikely(ftrace_disabled))
601                 return;
602
603         mutex_lock(&ftraced_lock);
604         /* ftraced_suspend is true if ftrace is running */
605         if (ftraced_suspend)
606                 command |= FTRACE_DISABLE_CALLS;
607
608         ftrace_run_update_code(command);
609         mutex_unlock(&ftraced_lock);
610 }
611
612 static cycle_t          ftrace_update_time;
613 static unsigned long    ftrace_update_cnt;
614 unsigned long           ftrace_update_tot_cnt;
615
616 static int __ftrace_update_code(void *ignore)
617 {
618         struct dyn_ftrace *p;
619         struct hlist_node *t, *n;
620         int save_ftrace_enabled;
621         cycle_t start, stop;
622         int i;
623
624         /* Don't be recording funcs now */
625         ftrace_record_suspend++;
626         save_ftrace_enabled = ftrace_enabled;
627         ftrace_enabled = 0;
628
629         start = ftrace_now(raw_smp_processor_id());
630         ftrace_update_cnt = 0;
631
632         /* No locks needed, the machine is stopped! */
633         for (i = 0; i < FTRACE_HASHSIZE; i++) {
634                 /* all CPUS are stopped, we are safe to modify code */
635                 hlist_for_each_entry_safe(p, t, n, &ftrace_hash[i], node) {
636                         /* Skip over failed records which have not been
637                          * freed. */
638                         if (p->flags & FTRACE_FL_FAILED)
639                                 continue;
640
641                         /* Unconverted records are always at the head of the
642                          * hash bucket. Once we encounter a converted record,
643                          * simply skip over to the next bucket. Saves ftraced
644                          * some processor cycles (ftrace does its bid for
645                          * global warming :-p ). */
646                         if (p->flags & (FTRACE_FL_CONVERTED))
647                                 break;
648
649                         if (ftrace_code_disable(p)) {
650                                 p->flags |= FTRACE_FL_CONVERTED;
651                                 ftrace_update_cnt++;
652                         } else {
653                                 if ((system_state == SYSTEM_BOOTING) ||
654                                     !core_kernel_text(p->ip)) {
655                                         ftrace_del_hash(p);
656                                         ftrace_free_rec(p);
657                                 }
658                         }
659                 }
660         }
661
662         stop = ftrace_now(raw_smp_processor_id());
663         ftrace_update_time = stop - start;
664         ftrace_update_tot_cnt += ftrace_update_cnt;
665         ftraced_trigger = 0;
666
667         ftrace_enabled = save_ftrace_enabled;
668         ftrace_record_suspend--;
669
670         return 0;
671 }
672
673 static int ftrace_update_code(void)
674 {
675         if (unlikely(ftrace_disabled) ||
676             !ftrace_enabled || !ftraced_trigger)
677                 return 0;
678
679         stop_machine_run(__ftrace_update_code, NULL, NR_CPUS);
680
681         return 1;
682 }
683
684 static int ftraced(void *ignore)
685 {
686         unsigned long usecs;
687
688         while (!kthread_should_stop()) {
689
690                 set_current_state(TASK_INTERRUPTIBLE);
691
692                 /* check once a second */
693                 schedule_timeout(HZ);
694
695                 if (unlikely(ftrace_disabled))
696                         continue;
697
698                 mutex_lock(&ftrace_sysctl_lock);
699                 mutex_lock(&ftraced_lock);
700                 if (!ftraced_suspend && !ftraced_stop &&
701                     ftrace_update_code()) {
702                         usecs = nsecs_to_usecs(ftrace_update_time);
703                         if (ftrace_update_tot_cnt > 100000) {
704                                 ftrace_update_tot_cnt = 0;
705                                 pr_info("hm, dftrace overflow: %lu change%s"
706                                         " (%lu total) in %lu usec%s\n",
707                                         ftrace_update_cnt,
708                                         ftrace_update_cnt != 1 ? "s" : "",
709                                         ftrace_update_tot_cnt,
710                                         usecs, usecs != 1 ? "s" : "");
711                                 ftrace_disabled = 1;
712                                 WARN_ON_ONCE(1);
713                         }
714                 }
715                 mutex_unlock(&ftraced_lock);
716                 mutex_unlock(&ftrace_sysctl_lock);
717
718                 ftrace_shutdown_replenish();
719         }
720         __set_current_state(TASK_RUNNING);
721         return 0;
722 }
723
724 static int __init ftrace_dyn_table_alloc(void)
725 {
726         struct ftrace_page *pg;
727         int cnt;
728         int i;
729
730         /* allocate a few pages */
731         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
732         if (!ftrace_pages_start)
733                 return -1;
734
735         /*
736          * Allocate a few more pages.
737          *
738          * TODO: have some parser search vmlinux before
739          *   final linking to find all calls to ftrace.
740          *   Then we can:
741          *    a) know how many pages to allocate.
742          *     and/or
743          *    b) set up the table then.
744          *
745          *  The dynamic code is still necessary for
746          *  modules.
747          */
748
749         pg = ftrace_pages = ftrace_pages_start;
750
751         cnt = NR_TO_INIT / ENTRIES_PER_PAGE;
752
753         for (i = 0; i < cnt; i++) {
754                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
755
756                 /* If we fail, we'll try later anyway */
757                 if (!pg->next)
758                         break;
759
760                 pg = pg->next;
761         }
762
763         return 0;
764 }
765
766 enum {
767         FTRACE_ITER_FILTER      = (1 << 0),
768         FTRACE_ITER_CONT        = (1 << 1),
769         FTRACE_ITER_NOTRACE     = (1 << 2),
770         FTRACE_ITER_FAILURES    = (1 << 3),
771 };
772
773 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
774
775 struct ftrace_iterator {
776         loff_t                  pos;
777         struct ftrace_page      *pg;
778         unsigned                idx;
779         unsigned                flags;
780         unsigned char           buffer[FTRACE_BUFF_MAX+1];
781         unsigned                buffer_idx;
782         unsigned                filtered;
783 };
784
785 static void *
786 t_next(struct seq_file *m, void *v, loff_t *pos)
787 {
788         struct ftrace_iterator *iter = m->private;
789         struct dyn_ftrace *rec = NULL;
790
791         (*pos)++;
792
793  retry:
794         if (iter->idx >= iter->pg->index) {
795                 if (iter->pg->next) {
796                         iter->pg = iter->pg->next;
797                         iter->idx = 0;
798                         goto retry;
799                 }
800         } else {
801                 rec = &iter->pg->records[iter->idx++];
802                 if ((!(iter->flags & FTRACE_ITER_FAILURES) &&
803                      (rec->flags & FTRACE_FL_FAILED)) ||
804
805                     ((iter->flags & FTRACE_ITER_FAILURES) &&
806                      (!(rec->flags & FTRACE_FL_FAILED) ||
807                       (rec->flags & FTRACE_FL_FREE))) ||
808
809                     ((iter->flags & FTRACE_ITER_FILTER) &&
810                      !(rec->flags & FTRACE_FL_FILTER)) ||
811
812                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
813                      !(rec->flags & FTRACE_FL_NOTRACE))) {
814                         rec = NULL;
815                         goto retry;
816                 }
817         }
818
819         iter->pos = *pos;
820
821         return rec;
822 }
823
824 static void *t_start(struct seq_file *m, loff_t *pos)
825 {
826         struct ftrace_iterator *iter = m->private;
827         void *p = NULL;
828         loff_t l = -1;
829
830         if (*pos != iter->pos) {
831                 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
832                         ;
833         } else {
834                 l = *pos;
835                 p = t_next(m, p, &l);
836         }
837
838         return p;
839 }
840
841 static void t_stop(struct seq_file *m, void *p)
842 {
843 }
844
845 static int t_show(struct seq_file *m, void *v)
846 {
847         struct dyn_ftrace *rec = v;
848         char str[KSYM_SYMBOL_LEN];
849
850         if (!rec)
851                 return 0;
852
853         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
854
855         seq_printf(m, "%s\n", str);
856
857         return 0;
858 }
859
860 static struct seq_operations show_ftrace_seq_ops = {
861         .start = t_start,
862         .next = t_next,
863         .stop = t_stop,
864         .show = t_show,
865 };
866
867 static int
868 ftrace_avail_open(struct inode *inode, struct file *file)
869 {
870         struct ftrace_iterator *iter;
871         int ret;
872
873         if (unlikely(ftrace_disabled))
874                 return -ENODEV;
875
876         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
877         if (!iter)
878                 return -ENOMEM;
879
880         iter->pg = ftrace_pages_start;
881         iter->pos = -1;
882
883         ret = seq_open(file, &show_ftrace_seq_ops);
884         if (!ret) {
885                 struct seq_file *m = file->private_data;
886
887                 m->private = iter;
888         } else {
889                 kfree(iter);
890         }
891
892         return ret;
893 }
894
895 int ftrace_avail_release(struct inode *inode, struct file *file)
896 {
897         struct seq_file *m = (struct seq_file *)file->private_data;
898         struct ftrace_iterator *iter = m->private;
899
900         seq_release(inode, file);
901         kfree(iter);
902
903         return 0;
904 }
905
906 static int
907 ftrace_failures_open(struct inode *inode, struct file *file)
908 {
909         int ret;
910         struct seq_file *m;
911         struct ftrace_iterator *iter;
912
913         ret = ftrace_avail_open(inode, file);
914         if (!ret) {
915                 m = (struct seq_file *)file->private_data;
916                 iter = (struct ftrace_iterator *)m->private;
917                 iter->flags = FTRACE_ITER_FAILURES;
918         }
919
920         return ret;
921 }
922
923
924 static void ftrace_filter_reset(int enable)
925 {
926         struct ftrace_page *pg;
927         struct dyn_ftrace *rec;
928         unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
929         unsigned i;
930
931         /* keep kstop machine from running */
932         preempt_disable();
933         if (enable)
934                 ftrace_filtered = 0;
935         pg = ftrace_pages_start;
936         while (pg) {
937                 for (i = 0; i < pg->index; i++) {
938                         rec = &pg->records[i];
939                         if (rec->flags & FTRACE_FL_FAILED)
940                                 continue;
941                         rec->flags &= ~type;
942                 }
943                 pg = pg->next;
944         }
945         preempt_enable();
946 }
947
948 static int
949 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
950 {
951         struct ftrace_iterator *iter;
952         int ret = 0;
953
954         if (unlikely(ftrace_disabled))
955                 return -ENODEV;
956
957         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
958         if (!iter)
959                 return -ENOMEM;
960
961         mutex_lock(&ftrace_regex_lock);
962         if ((file->f_mode & FMODE_WRITE) &&
963             !(file->f_flags & O_APPEND))
964                 ftrace_filter_reset(enable);
965
966         if (file->f_mode & FMODE_READ) {
967                 iter->pg = ftrace_pages_start;
968                 iter->pos = -1;
969                 iter->flags = enable ? FTRACE_ITER_FILTER :
970                         FTRACE_ITER_NOTRACE;
971
972                 ret = seq_open(file, &show_ftrace_seq_ops);
973                 if (!ret) {
974                         struct seq_file *m = file->private_data;
975                         m->private = iter;
976                 } else
977                         kfree(iter);
978         } else
979                 file->private_data = iter;
980         mutex_unlock(&ftrace_regex_lock);
981
982         return ret;
983 }
984
985 static int
986 ftrace_filter_open(struct inode *inode, struct file *file)
987 {
988         return ftrace_regex_open(inode, file, 1);
989 }
990
991 static int
992 ftrace_notrace_open(struct inode *inode, struct file *file)
993 {
994         return ftrace_regex_open(inode, file, 0);
995 }
996
997 static ssize_t
998 ftrace_regex_read(struct file *file, char __user *ubuf,
999                        size_t cnt, loff_t *ppos)
1000 {
1001         if (file->f_mode & FMODE_READ)
1002                 return seq_read(file, ubuf, cnt, ppos);
1003         else
1004                 return -EPERM;
1005 }
1006
1007 static loff_t
1008 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1009 {
1010         loff_t ret;
1011
1012         if (file->f_mode & FMODE_READ)
1013                 ret = seq_lseek(file, offset, origin);
1014         else
1015                 file->f_pos = ret = 1;
1016
1017         return ret;
1018 }
1019
1020 enum {
1021         MATCH_FULL,
1022         MATCH_FRONT_ONLY,
1023         MATCH_MIDDLE_ONLY,
1024         MATCH_END_ONLY,
1025 };
1026
1027 static void
1028 ftrace_match(unsigned char *buff, int len, int enable)
1029 {
1030         char str[KSYM_SYMBOL_LEN];
1031         char *search = NULL;
1032         struct ftrace_page *pg;
1033         struct dyn_ftrace *rec;
1034         int type = MATCH_FULL;
1035         unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1036         unsigned i, match = 0, search_len = 0;
1037
1038         for (i = 0; i < len; i++) {
1039                 if (buff[i] == '*') {
1040                         if (!i) {
1041                                 search = buff + i + 1;
1042                                 type = MATCH_END_ONLY;
1043                                 search_len = len - (i + 1);
1044                         } else {
1045                                 if (type == MATCH_END_ONLY) {
1046                                         type = MATCH_MIDDLE_ONLY;
1047                                 } else {
1048                                         match = i;
1049                                         type = MATCH_FRONT_ONLY;
1050                                 }
1051                                 buff[i] = 0;
1052                                 break;
1053                         }
1054                 }
1055         }
1056
1057         /* keep kstop machine from running */
1058         preempt_disable();
1059         if (enable)
1060                 ftrace_filtered = 1;
1061         pg = ftrace_pages_start;
1062         while (pg) {
1063                 for (i = 0; i < pg->index; i++) {
1064                         int matched = 0;
1065                         char *ptr;
1066
1067                         rec = &pg->records[i];
1068                         if (rec->flags & FTRACE_FL_FAILED)
1069                                 continue;
1070                         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1071                         switch (type) {
1072                         case MATCH_FULL:
1073                                 if (strcmp(str, buff) == 0)
1074                                         matched = 1;
1075                                 break;
1076                         case MATCH_FRONT_ONLY:
1077                                 if (memcmp(str, buff, match) == 0)
1078                                         matched = 1;
1079                                 break;
1080                         case MATCH_MIDDLE_ONLY:
1081                                 if (strstr(str, search))
1082                                         matched = 1;
1083                                 break;
1084                         case MATCH_END_ONLY:
1085                                 ptr = strstr(str, search);
1086                                 if (ptr && (ptr[search_len] == 0))
1087                                         matched = 1;
1088                                 break;
1089                         }
1090                         if (matched)
1091                                 rec->flags |= flag;
1092                 }
1093                 pg = pg->next;
1094         }
1095         preempt_enable();
1096 }
1097
1098 static ssize_t
1099 ftrace_regex_write(struct file *file, const char __user *ubuf,
1100                    size_t cnt, loff_t *ppos, int enable)
1101 {
1102         struct ftrace_iterator *iter;
1103         char ch;
1104         size_t read = 0;
1105         ssize_t ret;
1106
1107         if (!cnt || cnt < 0)
1108                 return 0;
1109
1110         mutex_lock(&ftrace_regex_lock);
1111
1112         if (file->f_mode & FMODE_READ) {
1113                 struct seq_file *m = file->private_data;
1114                 iter = m->private;
1115         } else
1116                 iter = file->private_data;
1117
1118         if (!*ppos) {
1119                 iter->flags &= ~FTRACE_ITER_CONT;
1120                 iter->buffer_idx = 0;
1121         }
1122
1123         ret = get_user(ch, ubuf++);
1124         if (ret)
1125                 goto out;
1126         read++;
1127         cnt--;
1128
1129         if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1130                 /* skip white space */
1131                 while (cnt && isspace(ch)) {
1132                         ret = get_user(ch, ubuf++);
1133                         if (ret)
1134                                 goto out;
1135                         read++;
1136                         cnt--;
1137                 }
1138
1139                 if (isspace(ch)) {
1140                         file->f_pos += read;
1141                         ret = read;
1142                         goto out;
1143                 }
1144
1145                 iter->buffer_idx = 0;
1146         }
1147
1148         while (cnt && !isspace(ch)) {
1149                 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1150                         iter->buffer[iter->buffer_idx++] = ch;
1151                 else {
1152                         ret = -EINVAL;
1153                         goto out;
1154                 }
1155                 ret = get_user(ch, ubuf++);
1156                 if (ret)
1157                         goto out;
1158                 read++;
1159                 cnt--;
1160         }
1161
1162         if (isspace(ch)) {
1163                 iter->filtered++;
1164                 iter->buffer[iter->buffer_idx] = 0;
1165                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1166                 iter->buffer_idx = 0;
1167         } else
1168                 iter->flags |= FTRACE_ITER_CONT;
1169
1170
1171         file->f_pos += read;
1172
1173         ret = read;
1174  out:
1175         mutex_unlock(&ftrace_regex_lock);
1176
1177         return ret;
1178 }
1179
1180 static ssize_t
1181 ftrace_filter_write(struct file *file, const char __user *ubuf,
1182                     size_t cnt, loff_t *ppos)
1183 {
1184         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1185 }
1186
1187 static ssize_t
1188 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1189                      size_t cnt, loff_t *ppos)
1190 {
1191         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1192 }
1193
1194 static void
1195 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1196 {
1197         if (unlikely(ftrace_disabled))
1198                 return;
1199
1200         mutex_lock(&ftrace_regex_lock);
1201         if (reset)
1202                 ftrace_filter_reset(enable);
1203         if (buf)
1204                 ftrace_match(buf, len, enable);
1205         mutex_unlock(&ftrace_regex_lock);
1206 }
1207
1208 /**
1209  * ftrace_set_filter - set a function to filter on in ftrace
1210  * @buf - the string that holds the function filter text.
1211  * @len - the length of the string.
1212  * @reset - non zero to reset all filters before applying this filter.
1213  *
1214  * Filters denote which functions should be enabled when tracing is enabled.
1215  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1216  */
1217 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1218 {
1219         ftrace_set_regex(buf, len, reset, 1);
1220 }
1221
1222 /**
1223  * ftrace_set_notrace - set a function to not trace in ftrace
1224  * @buf - the string that holds the function notrace text.
1225  * @len - the length of the string.
1226  * @reset - non zero to reset all filters before applying this filter.
1227  *
1228  * Notrace Filters denote which functions should not be enabled when tracing
1229  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1230  * for tracing.
1231  */
1232 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1233 {
1234         ftrace_set_regex(buf, len, reset, 0);
1235 }
1236
1237 static int
1238 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1239 {
1240         struct seq_file *m = (struct seq_file *)file->private_data;
1241         struct ftrace_iterator *iter;
1242
1243         mutex_lock(&ftrace_regex_lock);
1244         if (file->f_mode & FMODE_READ) {
1245                 iter = m->private;
1246
1247                 seq_release(inode, file);
1248         } else
1249                 iter = file->private_data;
1250
1251         if (iter->buffer_idx) {
1252                 iter->filtered++;
1253                 iter->buffer[iter->buffer_idx] = 0;
1254                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1255         }
1256
1257         mutex_lock(&ftrace_sysctl_lock);
1258         mutex_lock(&ftraced_lock);
1259         if (iter->filtered && ftraced_suspend && ftrace_enabled)
1260                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1261         mutex_unlock(&ftraced_lock);
1262         mutex_unlock(&ftrace_sysctl_lock);
1263
1264         kfree(iter);
1265         mutex_unlock(&ftrace_regex_lock);
1266         return 0;
1267 }
1268
1269 static int
1270 ftrace_filter_release(struct inode *inode, struct file *file)
1271 {
1272         return ftrace_regex_release(inode, file, 1);
1273 }
1274
1275 static int
1276 ftrace_notrace_release(struct inode *inode, struct file *file)
1277 {
1278         return ftrace_regex_release(inode, file, 0);
1279 }
1280
1281 static ssize_t
1282 ftraced_read(struct file *filp, char __user *ubuf,
1283                      size_t cnt, loff_t *ppos)
1284 {
1285         /* don't worry about races */
1286         char *buf = ftraced_stop ? "disabled\n" : "enabled\n";
1287         int r = strlen(buf);
1288
1289         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1290 }
1291
1292 static ssize_t
1293 ftraced_write(struct file *filp, const char __user *ubuf,
1294                       size_t cnt, loff_t *ppos)
1295 {
1296         char buf[64];
1297         long val;
1298         int ret;
1299
1300         if (cnt >= sizeof(buf))
1301                 return -EINVAL;
1302
1303         if (copy_from_user(&buf, ubuf, cnt))
1304                 return -EFAULT;
1305
1306         if (strncmp(buf, "enable", 6) == 0)
1307                 val = 1;
1308         else if (strncmp(buf, "disable", 7) == 0)
1309                 val = 0;
1310         else {
1311                 buf[cnt] = 0;
1312
1313                 ret = strict_strtoul(buf, 10, &val);
1314                 if (ret < 0)
1315                         return ret;
1316
1317                 val = !!val;
1318         }
1319
1320         if (val)
1321                 ftrace_enable_daemon();
1322         else
1323                 ftrace_disable_daemon();
1324
1325         filp->f_pos += cnt;
1326
1327         return cnt;
1328 }
1329
1330 static struct file_operations ftrace_avail_fops = {
1331         .open = ftrace_avail_open,
1332         .read = seq_read,
1333         .llseek = seq_lseek,
1334         .release = ftrace_avail_release,
1335 };
1336
1337 static struct file_operations ftrace_failures_fops = {
1338         .open = ftrace_failures_open,
1339         .read = seq_read,
1340         .llseek = seq_lseek,
1341         .release = ftrace_avail_release,
1342 };
1343
1344 static struct file_operations ftrace_filter_fops = {
1345         .open = ftrace_filter_open,
1346         .read = ftrace_regex_read,
1347         .write = ftrace_filter_write,
1348         .llseek = ftrace_regex_lseek,
1349         .release = ftrace_filter_release,
1350 };
1351
1352 static struct file_operations ftrace_notrace_fops = {
1353         .open = ftrace_notrace_open,
1354         .read = ftrace_regex_read,
1355         .write = ftrace_notrace_write,
1356         .llseek = ftrace_regex_lseek,
1357         .release = ftrace_notrace_release,
1358 };
1359
1360 static struct file_operations ftraced_fops = {
1361         .open = tracing_open_generic,
1362         .read = ftraced_read,
1363         .write = ftraced_write,
1364 };
1365
1366 /**
1367  * ftrace_force_update - force an update to all recording ftrace functions
1368  */
1369 int ftrace_force_update(void)
1370 {
1371         int ret = 0;
1372
1373         if (unlikely(ftrace_disabled))
1374                 return -ENODEV;
1375
1376         mutex_lock(&ftrace_sysctl_lock);
1377         mutex_lock(&ftraced_lock);
1378
1379         /*
1380          * If ftraced_trigger is not set, then there is nothing
1381          * to update.
1382          */
1383         if (ftraced_trigger && !ftrace_update_code())
1384                 ret = -EBUSY;
1385
1386         mutex_unlock(&ftraced_lock);
1387         mutex_unlock(&ftrace_sysctl_lock);
1388
1389         return ret;
1390 }
1391
1392 static void ftrace_force_shutdown(void)
1393 {
1394         struct task_struct *task;
1395         int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
1396
1397         mutex_lock(&ftraced_lock);
1398         task = ftraced_task;
1399         ftraced_task = NULL;
1400         ftraced_suspend = -1;
1401         ftrace_run_update_code(command);
1402         mutex_unlock(&ftraced_lock);
1403
1404         if (task)
1405                 kthread_stop(task);
1406 }
1407
1408 static __init int ftrace_init_debugfs(void)
1409 {
1410         struct dentry *d_tracer;
1411         struct dentry *entry;
1412
1413         d_tracer = tracing_init_dentry();
1414
1415         entry = debugfs_create_file("available_filter_functions", 0444,
1416                                     d_tracer, NULL, &ftrace_avail_fops);
1417         if (!entry)
1418                 pr_warning("Could not create debugfs "
1419                            "'available_filter_functions' entry\n");
1420
1421         entry = debugfs_create_file("failures", 0444,
1422                                     d_tracer, NULL, &ftrace_failures_fops);
1423         if (!entry)
1424                 pr_warning("Could not create debugfs 'failures' entry\n");
1425
1426         entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1427                                     NULL, &ftrace_filter_fops);
1428         if (!entry)
1429                 pr_warning("Could not create debugfs "
1430                            "'set_ftrace_filter' entry\n");
1431
1432         entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1433                                     NULL, &ftrace_notrace_fops);
1434         if (!entry)
1435                 pr_warning("Could not create debugfs "
1436                            "'set_ftrace_notrace' entry\n");
1437
1438         entry = debugfs_create_file("ftraced_enabled", 0644, d_tracer,
1439                                     NULL, &ftraced_fops);
1440         if (!entry)
1441                 pr_warning("Could not create debugfs "
1442                            "'ftraced_enabled' entry\n");
1443         return 0;
1444 }
1445
1446 fs_initcall(ftrace_init_debugfs);
1447
1448 static int __init ftrace_dynamic_init(void)
1449 {
1450         struct task_struct *p;
1451         unsigned long addr;
1452         int ret;
1453
1454         addr = (unsigned long)ftrace_record_ip;
1455
1456         stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS);
1457
1458         /* ftrace_dyn_arch_init places the return code in addr */
1459         if (addr) {
1460                 ret = (int)addr;
1461                 goto failed;
1462         }
1463
1464         ret = ftrace_dyn_table_alloc();
1465         if (ret)
1466                 goto failed;
1467
1468         p = kthread_run(ftraced, NULL, "ftraced");
1469         if (IS_ERR(p)) {
1470                 ret = -1;
1471                 goto failed;
1472         }
1473
1474         last_ftrace_enabled = ftrace_enabled = 1;
1475         ftraced_task = p;
1476
1477         return 0;
1478
1479  failed:
1480         ftrace_disabled = 1;
1481         return ret;
1482 }
1483
1484 core_initcall(ftrace_dynamic_init);
1485 #else
1486 # define ftrace_startup()               do { } while (0)
1487 # define ftrace_shutdown()              do { } while (0)
1488 # define ftrace_startup_sysctl()        do { } while (0)
1489 # define ftrace_shutdown_sysctl()       do { } while (0)
1490 # define ftrace_force_shutdown()        do { } while (0)
1491 #endif /* CONFIG_DYNAMIC_FTRACE */
1492
1493 /**
1494  * ftrace_kill - totally shutdown ftrace
1495  *
1496  * This is a safety measure. If something was detected that seems
1497  * wrong, calling this function will keep ftrace from doing
1498  * any more modifications, and updates.
1499  * used when something went wrong.
1500  */
1501 void ftrace_kill(void)
1502 {
1503         mutex_lock(&ftrace_sysctl_lock);
1504         ftrace_disabled = 1;
1505         ftrace_enabled = 0;
1506
1507         clear_ftrace_function();
1508         mutex_unlock(&ftrace_sysctl_lock);
1509
1510         /* Try to totally disable ftrace */
1511         ftrace_force_shutdown();
1512 }
1513
1514 /**
1515  * register_ftrace_function - register a function for profiling
1516  * @ops - ops structure that holds the function for profiling.
1517  *
1518  * Register a function to be called by all functions in the
1519  * kernel.
1520  *
1521  * Note: @ops->func and all the functions it calls must be labeled
1522  *       with "notrace", otherwise it will go into a
1523  *       recursive loop.
1524  */
1525 int register_ftrace_function(struct ftrace_ops *ops)
1526 {
1527         int ret;
1528
1529         if (unlikely(ftrace_disabled))
1530                 return -1;
1531
1532         mutex_lock(&ftrace_sysctl_lock);
1533         ret = __register_ftrace_function(ops);
1534         ftrace_startup();
1535         mutex_unlock(&ftrace_sysctl_lock);
1536
1537         return ret;
1538 }
1539
1540 /**
1541  * unregister_ftrace_function - unresgister a function for profiling.
1542  * @ops - ops structure that holds the function to unregister
1543  *
1544  * Unregister a function that was added to be called by ftrace profiling.
1545  */
1546 int unregister_ftrace_function(struct ftrace_ops *ops)
1547 {
1548         int ret;
1549
1550         mutex_lock(&ftrace_sysctl_lock);
1551         ret = __unregister_ftrace_function(ops);
1552         ftrace_shutdown();
1553         mutex_unlock(&ftrace_sysctl_lock);
1554
1555         return ret;
1556 }
1557
1558 int
1559 ftrace_enable_sysctl(struct ctl_table *table, int write,
1560                      struct file *file, void __user *buffer, size_t *lenp,
1561                      loff_t *ppos)
1562 {
1563         int ret;
1564
1565         if (unlikely(ftrace_disabled))
1566                 return -ENODEV;
1567
1568         mutex_lock(&ftrace_sysctl_lock);
1569
1570         ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
1571
1572         if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1573                 goto out;
1574
1575         last_ftrace_enabled = ftrace_enabled;
1576
1577         if (ftrace_enabled) {
1578
1579                 ftrace_startup_sysctl();
1580
1581                 /* we are starting ftrace again */
1582                 if (ftrace_list != &ftrace_list_end) {
1583                         if (ftrace_list->next == &ftrace_list_end)
1584                                 ftrace_trace_function = ftrace_list->func;
1585                         else
1586                                 ftrace_trace_function = ftrace_list_func;
1587                 }
1588
1589         } else {
1590                 /* stopping ftrace calls (just send to ftrace_stub) */
1591                 ftrace_trace_function = ftrace_stub;
1592
1593                 ftrace_shutdown_sysctl();
1594         }
1595
1596  out:
1597         mutex_unlock(&ftrace_sysctl_lock);
1598         return ret;
1599 }