namespacecheck: fixes
[safe/jmp/linux-2.6] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/ftrace.h>
25 #include <linux/sysctl.h>
26 #include <linux/ctype.h>
27 #include <linux/hash.h>
28 #include <linux/list.h>
29
30 #include "trace.h"
31
32 /* ftrace_enabled is a method to turn ftrace on or off */
33 int ftrace_enabled __read_mostly;
34 static int last_ftrace_enabled;
35
36 /*
37  * ftrace_disabled is set when an anomaly is discovered.
38  * ftrace_disabled is much stronger than ftrace_enabled.
39  */
40 static int ftrace_disabled __read_mostly;
41
42 static DEFINE_SPINLOCK(ftrace_lock);
43 static DEFINE_MUTEX(ftrace_sysctl_lock);
44
45 static struct ftrace_ops ftrace_list_end __read_mostly =
46 {
47         .func = ftrace_stub,
48 };
49
50 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
51 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
52
53 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
54 {
55         struct ftrace_ops *op = ftrace_list;
56
57         /* in case someone actually ports this to alpha! */
58         read_barrier_depends();
59
60         while (op != &ftrace_list_end) {
61                 /* silly alpha */
62                 read_barrier_depends();
63                 op->func(ip, parent_ip);
64                 op = op->next;
65         };
66 }
67
68 /**
69  * clear_ftrace_function - reset the ftrace function
70  *
71  * This NULLs the ftrace function and in essence stops
72  * tracing.  There may be lag
73  */
74 void clear_ftrace_function(void)
75 {
76         ftrace_trace_function = ftrace_stub;
77 }
78
79 static int __register_ftrace_function(struct ftrace_ops *ops)
80 {
81         /* Should never be called by interrupts */
82         spin_lock(&ftrace_lock);
83
84         ops->next = ftrace_list;
85         /*
86          * We are entering ops into the ftrace_list but another
87          * CPU might be walking that list. We need to make sure
88          * the ops->next pointer is valid before another CPU sees
89          * the ops pointer included into the ftrace_list.
90          */
91         smp_wmb();
92         ftrace_list = ops;
93
94         if (ftrace_enabled) {
95                 /*
96                  * For one func, simply call it directly.
97                  * For more than one func, call the chain.
98                  */
99                 if (ops->next == &ftrace_list_end)
100                         ftrace_trace_function = ops->func;
101                 else
102                         ftrace_trace_function = ftrace_list_func;
103         }
104
105         spin_unlock(&ftrace_lock);
106
107         return 0;
108 }
109
110 static int __unregister_ftrace_function(struct ftrace_ops *ops)
111 {
112         struct ftrace_ops **p;
113         int ret = 0;
114
115         spin_lock(&ftrace_lock);
116
117         /*
118          * If we are removing the last function, then simply point
119          * to the ftrace_stub.
120          */
121         if (ftrace_list == ops && ops->next == &ftrace_list_end) {
122                 ftrace_trace_function = ftrace_stub;
123                 ftrace_list = &ftrace_list_end;
124                 goto out;
125         }
126
127         for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
128                 if (*p == ops)
129                         break;
130
131         if (*p != ops) {
132                 ret = -1;
133                 goto out;
134         }
135
136         *p = (*p)->next;
137
138         if (ftrace_enabled) {
139                 /* If we only have one func left, then call that directly */
140                 if (ftrace_list == &ftrace_list_end ||
141                     ftrace_list->next == &ftrace_list_end)
142                         ftrace_trace_function = ftrace_list->func;
143         }
144
145  out:
146         spin_unlock(&ftrace_lock);
147
148         return ret;
149 }
150
151 #ifdef CONFIG_DYNAMIC_FTRACE
152
153 static struct task_struct *ftraced_task;
154
155 enum {
156         FTRACE_ENABLE_CALLS             = (1 << 0),
157         FTRACE_DISABLE_CALLS            = (1 << 1),
158         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
159         FTRACE_ENABLE_MCOUNT            = (1 << 3),
160         FTRACE_DISABLE_MCOUNT           = (1 << 4),
161 };
162
163 static int ftrace_filtered;
164
165 static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
166
167 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
168
169 static DEFINE_SPINLOCK(ftrace_shutdown_lock);
170 static DEFINE_MUTEX(ftraced_lock);
171 static DEFINE_MUTEX(ftrace_regex_lock);
172
173 struct ftrace_page {
174         struct ftrace_page      *next;
175         unsigned long           index;
176         struct dyn_ftrace       records[];
177 };
178
179 #define ENTRIES_PER_PAGE \
180   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
181
182 /* estimate from running different kernels */
183 #define NR_TO_INIT              10000
184
185 static struct ftrace_page       *ftrace_pages_start;
186 static struct ftrace_page       *ftrace_pages;
187
188 static int ftraced_trigger;
189 static int ftraced_suspend;
190 static int ftraced_stop;
191
192 static int ftrace_record_suspend;
193
194 static struct dyn_ftrace *ftrace_free_records;
195
196 static inline int
197 ftrace_ip_in_hash(unsigned long ip, unsigned long key)
198 {
199         struct dyn_ftrace *p;
200         struct hlist_node *t;
201         int found = 0;
202
203         hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) {
204                 if (p->ip == ip) {
205                         found = 1;
206                         break;
207                 }
208         }
209
210         return found;
211 }
212
213 static inline void
214 ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
215 {
216         hlist_add_head_rcu(&node->node, &ftrace_hash[key]);
217 }
218
219 /* called from kstop_machine */
220 static inline void ftrace_del_hash(struct dyn_ftrace *node)
221 {
222         hlist_del(&node->node);
223 }
224
225 static void ftrace_free_rec(struct dyn_ftrace *rec)
226 {
227         /* no locking, only called from kstop_machine */
228
229         rec->ip = (unsigned long)ftrace_free_records;
230         ftrace_free_records = rec;
231         rec->flags |= FTRACE_FL_FREE;
232 }
233
234 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
235 {
236         struct dyn_ftrace *rec;
237
238         /* First check for freed records */
239         if (ftrace_free_records) {
240                 rec = ftrace_free_records;
241
242                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
243                         WARN_ON_ONCE(1);
244                         ftrace_free_records = NULL;
245                         ftrace_disabled = 1;
246                         ftrace_enabled = 0;
247                         return NULL;
248                 }
249
250                 ftrace_free_records = (void *)rec->ip;
251                 memset(rec, 0, sizeof(*rec));
252                 return rec;
253         }
254
255         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
256                 if (!ftrace_pages->next)
257                         return NULL;
258                 ftrace_pages = ftrace_pages->next;
259         }
260
261         return &ftrace_pages->records[ftrace_pages->index++];
262 }
263
264 static void
265 ftrace_record_ip(unsigned long ip)
266 {
267         struct dyn_ftrace *node;
268         unsigned long flags;
269         unsigned long key;
270         int resched;
271         int atomic;
272         int cpu;
273
274         if (!ftrace_enabled || ftrace_disabled)
275                 return;
276
277         resched = need_resched();
278         preempt_disable_notrace();
279
280         /*
281          * We simply need to protect against recursion.
282          * Use the the raw version of smp_processor_id and not
283          * __get_cpu_var which can call debug hooks that can
284          * cause a recursive crash here.
285          */
286         cpu = raw_smp_processor_id();
287         per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
288         if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
289                 goto out;
290
291         if (unlikely(ftrace_record_suspend))
292                 goto out;
293
294         key = hash_long(ip, FTRACE_HASHBITS);
295
296         WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
297
298         if (ftrace_ip_in_hash(ip, key))
299                 goto out;
300
301         atomic = irqs_disabled();
302
303         spin_lock_irqsave(&ftrace_shutdown_lock, flags);
304
305         /* This ip may have hit the hash before the lock */
306         if (ftrace_ip_in_hash(ip, key))
307                 goto out_unlock;
308
309         node = ftrace_alloc_dyn_node(ip);
310         if (!node)
311                 goto out_unlock;
312
313         node->ip = ip;
314
315         ftrace_add_hash(node, key);
316
317         ftraced_trigger = 1;
318
319  out_unlock:
320         spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
321  out:
322         per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
323
324         /* prevent recursion with scheduler */
325         if (resched)
326                 preempt_enable_no_resched_notrace();
327         else
328                 preempt_enable_notrace();
329 }
330
331 #define FTRACE_ADDR ((long)(ftrace_caller))
332 #define MCOUNT_ADDR ((long)(mcount))
333
334 static int
335 __ftrace_replace_code(struct dyn_ftrace *rec,
336                       unsigned char *old, unsigned char *new, int enable)
337 {
338         unsigned long ip, fl;
339
340         ip = rec->ip;
341
342         if (ftrace_filtered && enable) {
343                 /*
344                  * If filtering is on:
345                  *
346                  * If this record is set to be filtered and
347                  * is enabled then do nothing.
348                  *
349                  * If this record is set to be filtered and
350                  * it is not enabled, enable it.
351                  *
352                  * If this record is not set to be filtered
353                  * and it is not enabled do nothing.
354                  *
355                  * If this record is set not to trace then
356                  * do nothing.
357                  *
358                  * If this record is set not to trace and
359                  * it is enabled then disable it.
360                  *
361                  * If this record is not set to be filtered and
362                  * it is enabled, disable it.
363                  */
364
365                 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
366                                    FTRACE_FL_ENABLED);
367
368                 if ((fl ==  (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
369                     (fl ==  (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
370                     !fl || (fl == FTRACE_FL_NOTRACE))
371                         return 0;
372
373                 /*
374                  * If it is enabled disable it,
375                  * otherwise enable it!
376                  */
377                 if (fl & FTRACE_FL_ENABLED) {
378                         /* swap new and old */
379                         new = old;
380                         old = ftrace_call_replace(ip, FTRACE_ADDR);
381                         rec->flags &= ~FTRACE_FL_ENABLED;
382                 } else {
383                         new = ftrace_call_replace(ip, FTRACE_ADDR);
384                         rec->flags |= FTRACE_FL_ENABLED;
385                 }
386         } else {
387
388                 if (enable) {
389                         /*
390                          * If this record is set not to trace and is
391                          * not enabled, do nothing.
392                          */
393                         fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
394                         if (fl == FTRACE_FL_NOTRACE)
395                                 return 0;
396
397                         new = ftrace_call_replace(ip, FTRACE_ADDR);
398                 } else
399                         old = ftrace_call_replace(ip, FTRACE_ADDR);
400
401                 if (enable) {
402                         if (rec->flags & FTRACE_FL_ENABLED)
403                                 return 0;
404                         rec->flags |= FTRACE_FL_ENABLED;
405                 } else {
406                         if (!(rec->flags & FTRACE_FL_ENABLED))
407                                 return 0;
408                         rec->flags &= ~FTRACE_FL_ENABLED;
409                 }
410         }
411
412         return ftrace_modify_code(ip, old, new);
413 }
414
415 static void ftrace_replace_code(int enable)
416 {
417         int i, failed;
418         unsigned char *new = NULL, *old = NULL;
419         struct dyn_ftrace *rec;
420         struct ftrace_page *pg;
421
422         if (enable)
423                 old = ftrace_nop_replace();
424         else
425                 new = ftrace_nop_replace();
426
427         for (pg = ftrace_pages_start; pg; pg = pg->next) {
428                 for (i = 0; i < pg->index; i++) {
429                         rec = &pg->records[i];
430
431                         /* don't modify code that has already faulted */
432                         if (rec->flags & FTRACE_FL_FAILED)
433                                 continue;
434
435                         failed = __ftrace_replace_code(rec, old, new, enable);
436                         if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
437                                 rec->flags |= FTRACE_FL_FAILED;
438                                 if ((system_state == SYSTEM_BOOTING) ||
439                                     !core_kernel_text(rec->ip)) {
440                                         ftrace_del_hash(rec);
441                                         ftrace_free_rec(rec);
442                                 }
443                         }
444                 }
445         }
446 }
447
448 static void ftrace_shutdown_replenish(void)
449 {
450         if (ftrace_pages->next)
451                 return;
452
453         /* allocate another page */
454         ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
455 }
456
457 static int
458 ftrace_code_disable(struct dyn_ftrace *rec)
459 {
460         unsigned long ip;
461         unsigned char *nop, *call;
462         int failed;
463
464         ip = rec->ip;
465
466         nop = ftrace_nop_replace();
467         call = ftrace_call_replace(ip, MCOUNT_ADDR);
468
469         failed = ftrace_modify_code(ip, call, nop);
470         if (failed) {
471                 rec->flags |= FTRACE_FL_FAILED;
472                 return 0;
473         }
474         return 1;
475 }
476
477 static int __ftrace_update_code(void *ignore);
478
479 static int __ftrace_modify_code(void *data)
480 {
481         unsigned long addr;
482         int *command = data;
483
484         if (*command & FTRACE_ENABLE_CALLS) {
485                 /*
486                  * Update any recorded ips now that we have the
487                  * machine stopped
488                  */
489                 __ftrace_update_code(NULL);
490                 ftrace_replace_code(1);
491         } else if (*command & FTRACE_DISABLE_CALLS)
492                 ftrace_replace_code(0);
493
494         if (*command & FTRACE_UPDATE_TRACE_FUNC)
495                 ftrace_update_ftrace_func(ftrace_trace_function);
496
497         if (*command & FTRACE_ENABLE_MCOUNT) {
498                 addr = (unsigned long)ftrace_record_ip;
499                 ftrace_mcount_set(&addr);
500         } else if (*command & FTRACE_DISABLE_MCOUNT) {
501                 addr = (unsigned long)ftrace_stub;
502                 ftrace_mcount_set(&addr);
503         }
504
505         return 0;
506 }
507
508 static void ftrace_run_update_code(int command)
509 {
510         stop_machine_run(__ftrace_modify_code, &command, NR_CPUS);
511 }
512
513 void ftrace_disable_daemon(void)
514 {
515         /* Stop the daemon from calling kstop_machine */
516         mutex_lock(&ftraced_lock);
517         ftraced_stop = 1;
518         mutex_unlock(&ftraced_lock);
519
520         ftrace_force_update();
521 }
522
523 void ftrace_enable_daemon(void)
524 {
525         mutex_lock(&ftraced_lock);
526         ftraced_stop = 0;
527         mutex_unlock(&ftraced_lock);
528
529         ftrace_force_update();
530 }
531
532 static ftrace_func_t saved_ftrace_func;
533
534 static void ftrace_startup(void)
535 {
536         int command = 0;
537
538         if (unlikely(ftrace_disabled))
539                 return;
540
541         mutex_lock(&ftraced_lock);
542         ftraced_suspend++;
543         if (ftraced_suspend == 1)
544                 command |= FTRACE_ENABLE_CALLS;
545
546         if (saved_ftrace_func != ftrace_trace_function) {
547                 saved_ftrace_func = ftrace_trace_function;
548                 command |= FTRACE_UPDATE_TRACE_FUNC;
549         }
550
551         if (!command || !ftrace_enabled)
552                 goto out;
553
554         ftrace_run_update_code(command);
555  out:
556         mutex_unlock(&ftraced_lock);
557 }
558
559 static void ftrace_shutdown(void)
560 {
561         int command = 0;
562
563         if (unlikely(ftrace_disabled))
564                 return;
565
566         mutex_lock(&ftraced_lock);
567         ftraced_suspend--;
568         if (!ftraced_suspend)
569                 command |= FTRACE_DISABLE_CALLS;
570
571         if (saved_ftrace_func != ftrace_trace_function) {
572                 saved_ftrace_func = ftrace_trace_function;
573                 command |= FTRACE_UPDATE_TRACE_FUNC;
574         }
575
576         if (!command || !ftrace_enabled)
577                 goto out;
578
579         ftrace_run_update_code(command);
580  out:
581         mutex_unlock(&ftraced_lock);
582 }
583
584 static void ftrace_startup_sysctl(void)
585 {
586         int command = FTRACE_ENABLE_MCOUNT;
587
588         if (unlikely(ftrace_disabled))
589                 return;
590
591         mutex_lock(&ftraced_lock);
592         /* Force update next time */
593         saved_ftrace_func = NULL;
594         /* ftraced_suspend is true if we want ftrace running */
595         if (ftraced_suspend)
596                 command |= FTRACE_ENABLE_CALLS;
597
598         ftrace_run_update_code(command);
599         mutex_unlock(&ftraced_lock);
600 }
601
602 static void ftrace_shutdown_sysctl(void)
603 {
604         int command = FTRACE_DISABLE_MCOUNT;
605
606         if (unlikely(ftrace_disabled))
607                 return;
608
609         mutex_lock(&ftraced_lock);
610         /* ftraced_suspend is true if ftrace is running */
611         if (ftraced_suspend)
612                 command |= FTRACE_DISABLE_CALLS;
613
614         ftrace_run_update_code(command);
615         mutex_unlock(&ftraced_lock);
616 }
617
618 static cycle_t          ftrace_update_time;
619 static unsigned long    ftrace_update_cnt;
620 unsigned long           ftrace_update_tot_cnt;
621
622 static int __ftrace_update_code(void *ignore)
623 {
624         struct dyn_ftrace *p;
625         struct hlist_node *t, *n;
626         int save_ftrace_enabled;
627         cycle_t start, stop;
628         int i;
629
630         /* Don't be recording funcs now */
631         ftrace_record_suspend++;
632         save_ftrace_enabled = ftrace_enabled;
633         ftrace_enabled = 0;
634
635         start = ftrace_now(raw_smp_processor_id());
636         ftrace_update_cnt = 0;
637
638         /* No locks needed, the machine is stopped! */
639         for (i = 0; i < FTRACE_HASHSIZE; i++) {
640                 /* all CPUS are stopped, we are safe to modify code */
641                 hlist_for_each_entry_safe(p, t, n, &ftrace_hash[i], node) {
642                         /* Skip over failed records which have not been
643                          * freed. */
644                         if (p->flags & FTRACE_FL_FAILED)
645                                 continue;
646
647                         /* Unconverted records are always at the head of the
648                          * hash bucket. Once we encounter a converted record,
649                          * simply skip over to the next bucket. Saves ftraced
650                          * some processor cycles (ftrace does its bid for
651                          * global warming :-p ). */
652                         if (p->flags & (FTRACE_FL_CONVERTED))
653                                 break;
654
655                         if (ftrace_code_disable(p)) {
656                                 p->flags |= FTRACE_FL_CONVERTED;
657                                 ftrace_update_cnt++;
658                         } else {
659                                 if ((system_state == SYSTEM_BOOTING) ||
660                                     !core_kernel_text(p->ip)) {
661                                         ftrace_del_hash(p);
662                                         ftrace_free_rec(p);
663                                 }
664                         }
665                 }
666         }
667
668         stop = ftrace_now(raw_smp_processor_id());
669         ftrace_update_time = stop - start;
670         ftrace_update_tot_cnt += ftrace_update_cnt;
671         ftraced_trigger = 0;
672
673         ftrace_enabled = save_ftrace_enabled;
674         ftrace_record_suspend--;
675
676         return 0;
677 }
678
679 static int ftrace_update_code(void)
680 {
681         if (unlikely(ftrace_disabled) ||
682             !ftrace_enabled || !ftraced_trigger)
683                 return 0;
684
685         stop_machine_run(__ftrace_update_code, NULL, NR_CPUS);
686
687         return 1;
688 }
689
690 static int ftraced(void *ignore)
691 {
692         unsigned long usecs;
693
694         while (!kthread_should_stop()) {
695
696                 set_current_state(TASK_INTERRUPTIBLE);
697
698                 /* check once a second */
699                 schedule_timeout(HZ);
700
701                 if (unlikely(ftrace_disabled))
702                         continue;
703
704                 mutex_lock(&ftrace_sysctl_lock);
705                 mutex_lock(&ftraced_lock);
706                 if (!ftraced_suspend && !ftraced_stop &&
707                     ftrace_update_code()) {
708                         usecs = nsecs_to_usecs(ftrace_update_time);
709                         if (ftrace_update_tot_cnt > 100000) {
710                                 ftrace_update_tot_cnt = 0;
711                                 pr_info("hm, dftrace overflow: %lu change%s"
712                                         " (%lu total) in %lu usec%s\n",
713                                         ftrace_update_cnt,
714                                         ftrace_update_cnt != 1 ? "s" : "",
715                                         ftrace_update_tot_cnt,
716                                         usecs, usecs != 1 ? "s" : "");
717                                 ftrace_disabled = 1;
718                                 WARN_ON_ONCE(1);
719                         }
720                 }
721                 mutex_unlock(&ftraced_lock);
722                 mutex_unlock(&ftrace_sysctl_lock);
723
724                 ftrace_shutdown_replenish();
725         }
726         __set_current_state(TASK_RUNNING);
727         return 0;
728 }
729
730 static int __init ftrace_dyn_table_alloc(void)
731 {
732         struct ftrace_page *pg;
733         int cnt;
734         int i;
735
736         /* allocate a few pages */
737         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
738         if (!ftrace_pages_start)
739                 return -1;
740
741         /*
742          * Allocate a few more pages.
743          *
744          * TODO: have some parser search vmlinux before
745          *   final linking to find all calls to ftrace.
746          *   Then we can:
747          *    a) know how many pages to allocate.
748          *     and/or
749          *    b) set up the table then.
750          *
751          *  The dynamic code is still necessary for
752          *  modules.
753          */
754
755         pg = ftrace_pages = ftrace_pages_start;
756
757         cnt = NR_TO_INIT / ENTRIES_PER_PAGE;
758
759         for (i = 0; i < cnt; i++) {
760                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
761
762                 /* If we fail, we'll try later anyway */
763                 if (!pg->next)
764                         break;
765
766                 pg = pg->next;
767         }
768
769         return 0;
770 }
771
772 enum {
773         FTRACE_ITER_FILTER      = (1 << 0),
774         FTRACE_ITER_CONT        = (1 << 1),
775         FTRACE_ITER_NOTRACE     = (1 << 2),
776         FTRACE_ITER_FAILURES    = (1 << 3),
777 };
778
779 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
780
781 struct ftrace_iterator {
782         loff_t                  pos;
783         struct ftrace_page      *pg;
784         unsigned                idx;
785         unsigned                flags;
786         unsigned char           buffer[FTRACE_BUFF_MAX+1];
787         unsigned                buffer_idx;
788         unsigned                filtered;
789 };
790
791 static void *
792 t_next(struct seq_file *m, void *v, loff_t *pos)
793 {
794         struct ftrace_iterator *iter = m->private;
795         struct dyn_ftrace *rec = NULL;
796
797         (*pos)++;
798
799  retry:
800         if (iter->idx >= iter->pg->index) {
801                 if (iter->pg->next) {
802                         iter->pg = iter->pg->next;
803                         iter->idx = 0;
804                         goto retry;
805                 }
806         } else {
807                 rec = &iter->pg->records[iter->idx++];
808                 if ((!(iter->flags & FTRACE_ITER_FAILURES) &&
809                      (rec->flags & FTRACE_FL_FAILED)) ||
810
811                     ((iter->flags & FTRACE_ITER_FAILURES) &&
812                      (!(rec->flags & FTRACE_FL_FAILED) ||
813                       (rec->flags & FTRACE_FL_FREE))) ||
814
815                     ((iter->flags & FTRACE_ITER_FILTER) &&
816                      !(rec->flags & FTRACE_FL_FILTER)) ||
817
818                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
819                      !(rec->flags & FTRACE_FL_NOTRACE))) {
820                         rec = NULL;
821                         goto retry;
822                 }
823         }
824
825         iter->pos = *pos;
826
827         return rec;
828 }
829
830 static void *t_start(struct seq_file *m, loff_t *pos)
831 {
832         struct ftrace_iterator *iter = m->private;
833         void *p = NULL;
834         loff_t l = -1;
835
836         if (*pos != iter->pos) {
837                 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
838                         ;
839         } else {
840                 l = *pos;
841                 p = t_next(m, p, &l);
842         }
843
844         return p;
845 }
846
847 static void t_stop(struct seq_file *m, void *p)
848 {
849 }
850
851 static int t_show(struct seq_file *m, void *v)
852 {
853         struct dyn_ftrace *rec = v;
854         char str[KSYM_SYMBOL_LEN];
855
856         if (!rec)
857                 return 0;
858
859         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
860
861         seq_printf(m, "%s\n", str);
862
863         return 0;
864 }
865
866 static struct seq_operations show_ftrace_seq_ops = {
867         .start = t_start,
868         .next = t_next,
869         .stop = t_stop,
870         .show = t_show,
871 };
872
873 static int
874 ftrace_avail_open(struct inode *inode, struct file *file)
875 {
876         struct ftrace_iterator *iter;
877         int ret;
878
879         if (unlikely(ftrace_disabled))
880                 return -ENODEV;
881
882         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
883         if (!iter)
884                 return -ENOMEM;
885
886         iter->pg = ftrace_pages_start;
887         iter->pos = -1;
888
889         ret = seq_open(file, &show_ftrace_seq_ops);
890         if (!ret) {
891                 struct seq_file *m = file->private_data;
892
893                 m->private = iter;
894         } else {
895                 kfree(iter);
896         }
897
898         return ret;
899 }
900
901 int ftrace_avail_release(struct inode *inode, struct file *file)
902 {
903         struct seq_file *m = (struct seq_file *)file->private_data;
904         struct ftrace_iterator *iter = m->private;
905
906         seq_release(inode, file);
907         kfree(iter);
908
909         return 0;
910 }
911
912 static int
913 ftrace_failures_open(struct inode *inode, struct file *file)
914 {
915         int ret;
916         struct seq_file *m;
917         struct ftrace_iterator *iter;
918
919         ret = ftrace_avail_open(inode, file);
920         if (!ret) {
921                 m = (struct seq_file *)file->private_data;
922                 iter = (struct ftrace_iterator *)m->private;
923                 iter->flags = FTRACE_ITER_FAILURES;
924         }
925
926         return ret;
927 }
928
929
930 static void ftrace_filter_reset(int enable)
931 {
932         struct ftrace_page *pg;
933         struct dyn_ftrace *rec;
934         unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
935         unsigned i;
936
937         /* keep kstop machine from running */
938         preempt_disable();
939         if (enable)
940                 ftrace_filtered = 0;
941         pg = ftrace_pages_start;
942         while (pg) {
943                 for (i = 0; i < pg->index; i++) {
944                         rec = &pg->records[i];
945                         if (rec->flags & FTRACE_FL_FAILED)
946                                 continue;
947                         rec->flags &= ~type;
948                 }
949                 pg = pg->next;
950         }
951         preempt_enable();
952 }
953
954 static int
955 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
956 {
957         struct ftrace_iterator *iter;
958         int ret = 0;
959
960         if (unlikely(ftrace_disabled))
961                 return -ENODEV;
962
963         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
964         if (!iter)
965                 return -ENOMEM;
966
967         mutex_lock(&ftrace_regex_lock);
968         if ((file->f_mode & FMODE_WRITE) &&
969             !(file->f_flags & O_APPEND))
970                 ftrace_filter_reset(enable);
971
972         if (file->f_mode & FMODE_READ) {
973                 iter->pg = ftrace_pages_start;
974                 iter->pos = -1;
975                 iter->flags = enable ? FTRACE_ITER_FILTER :
976                         FTRACE_ITER_NOTRACE;
977
978                 ret = seq_open(file, &show_ftrace_seq_ops);
979                 if (!ret) {
980                         struct seq_file *m = file->private_data;
981                         m->private = iter;
982                 } else
983                         kfree(iter);
984         } else
985                 file->private_data = iter;
986         mutex_unlock(&ftrace_regex_lock);
987
988         return ret;
989 }
990
991 static int
992 ftrace_filter_open(struct inode *inode, struct file *file)
993 {
994         return ftrace_regex_open(inode, file, 1);
995 }
996
997 static int
998 ftrace_notrace_open(struct inode *inode, struct file *file)
999 {
1000         return ftrace_regex_open(inode, file, 0);
1001 }
1002
1003 static ssize_t
1004 ftrace_regex_read(struct file *file, char __user *ubuf,
1005                        size_t cnt, loff_t *ppos)
1006 {
1007         if (file->f_mode & FMODE_READ)
1008                 return seq_read(file, ubuf, cnt, ppos);
1009         else
1010                 return -EPERM;
1011 }
1012
1013 static loff_t
1014 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1015 {
1016         loff_t ret;
1017
1018         if (file->f_mode & FMODE_READ)
1019                 ret = seq_lseek(file, offset, origin);
1020         else
1021                 file->f_pos = ret = 1;
1022
1023         return ret;
1024 }
1025
1026 enum {
1027         MATCH_FULL,
1028         MATCH_FRONT_ONLY,
1029         MATCH_MIDDLE_ONLY,
1030         MATCH_END_ONLY,
1031 };
1032
1033 static void
1034 ftrace_match(unsigned char *buff, int len, int enable)
1035 {
1036         char str[KSYM_SYMBOL_LEN];
1037         char *search = NULL;
1038         struct ftrace_page *pg;
1039         struct dyn_ftrace *rec;
1040         int type = MATCH_FULL;
1041         unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1042         unsigned i, match = 0, search_len = 0;
1043
1044         for (i = 0; i < len; i++) {
1045                 if (buff[i] == '*') {
1046                         if (!i) {
1047                                 search = buff + i + 1;
1048                                 type = MATCH_END_ONLY;
1049                                 search_len = len - (i + 1);
1050                         } else {
1051                                 if (type == MATCH_END_ONLY) {
1052                                         type = MATCH_MIDDLE_ONLY;
1053                                 } else {
1054                                         match = i;
1055                                         type = MATCH_FRONT_ONLY;
1056                                 }
1057                                 buff[i] = 0;
1058                                 break;
1059                         }
1060                 }
1061         }
1062
1063         /* keep kstop machine from running */
1064         preempt_disable();
1065         if (enable)
1066                 ftrace_filtered = 1;
1067         pg = ftrace_pages_start;
1068         while (pg) {
1069                 for (i = 0; i < pg->index; i++) {
1070                         int matched = 0;
1071                         char *ptr;
1072
1073                         rec = &pg->records[i];
1074                         if (rec->flags & FTRACE_FL_FAILED)
1075                                 continue;
1076                         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1077                         switch (type) {
1078                         case MATCH_FULL:
1079                                 if (strcmp(str, buff) == 0)
1080                                         matched = 1;
1081                                 break;
1082                         case MATCH_FRONT_ONLY:
1083                                 if (memcmp(str, buff, match) == 0)
1084                                         matched = 1;
1085                                 break;
1086                         case MATCH_MIDDLE_ONLY:
1087                                 if (strstr(str, search))
1088                                         matched = 1;
1089                                 break;
1090                         case MATCH_END_ONLY:
1091                                 ptr = strstr(str, search);
1092                                 if (ptr && (ptr[search_len] == 0))
1093                                         matched = 1;
1094                                 break;
1095                         }
1096                         if (matched)
1097                                 rec->flags |= flag;
1098                 }
1099                 pg = pg->next;
1100         }
1101         preempt_enable();
1102 }
1103
1104 static ssize_t
1105 ftrace_regex_write(struct file *file, const char __user *ubuf,
1106                    size_t cnt, loff_t *ppos, int enable)
1107 {
1108         struct ftrace_iterator *iter;
1109         char ch;
1110         size_t read = 0;
1111         ssize_t ret;
1112
1113         if (!cnt || cnt < 0)
1114                 return 0;
1115
1116         mutex_lock(&ftrace_regex_lock);
1117
1118         if (file->f_mode & FMODE_READ) {
1119                 struct seq_file *m = file->private_data;
1120                 iter = m->private;
1121         } else
1122                 iter = file->private_data;
1123
1124         if (!*ppos) {
1125                 iter->flags &= ~FTRACE_ITER_CONT;
1126                 iter->buffer_idx = 0;
1127         }
1128
1129         ret = get_user(ch, ubuf++);
1130         if (ret)
1131                 goto out;
1132         read++;
1133         cnt--;
1134
1135         if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1136                 /* skip white space */
1137                 while (cnt && isspace(ch)) {
1138                         ret = get_user(ch, ubuf++);
1139                         if (ret)
1140                                 goto out;
1141                         read++;
1142                         cnt--;
1143                 }
1144
1145                 if (isspace(ch)) {
1146                         file->f_pos += read;
1147                         ret = read;
1148                         goto out;
1149                 }
1150
1151                 iter->buffer_idx = 0;
1152         }
1153
1154         while (cnt && !isspace(ch)) {
1155                 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1156                         iter->buffer[iter->buffer_idx++] = ch;
1157                 else {
1158                         ret = -EINVAL;
1159                         goto out;
1160                 }
1161                 ret = get_user(ch, ubuf++);
1162                 if (ret)
1163                         goto out;
1164                 read++;
1165                 cnt--;
1166         }
1167
1168         if (isspace(ch)) {
1169                 iter->filtered++;
1170                 iter->buffer[iter->buffer_idx] = 0;
1171                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1172                 iter->buffer_idx = 0;
1173         } else
1174                 iter->flags |= FTRACE_ITER_CONT;
1175
1176
1177         file->f_pos += read;
1178
1179         ret = read;
1180  out:
1181         mutex_unlock(&ftrace_regex_lock);
1182
1183         return ret;
1184 }
1185
1186 static ssize_t
1187 ftrace_filter_write(struct file *file, const char __user *ubuf,
1188                     size_t cnt, loff_t *ppos)
1189 {
1190         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1191 }
1192
1193 static ssize_t
1194 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1195                      size_t cnt, loff_t *ppos)
1196 {
1197         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1198 }
1199
1200 static void
1201 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1202 {
1203         if (unlikely(ftrace_disabled))
1204                 return;
1205
1206         mutex_lock(&ftrace_regex_lock);
1207         if (reset)
1208                 ftrace_filter_reset(enable);
1209         if (buf)
1210                 ftrace_match(buf, len, enable);
1211         mutex_unlock(&ftrace_regex_lock);
1212 }
1213
1214 /**
1215  * ftrace_set_filter - set a function to filter on in ftrace
1216  * @buf - the string that holds the function filter text.
1217  * @len - the length of the string.
1218  * @reset - non zero to reset all filters before applying this filter.
1219  *
1220  * Filters denote which functions should be enabled when tracing is enabled.
1221  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1222  */
1223 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1224 {
1225         ftrace_set_regex(buf, len, reset, 1);
1226 }
1227
1228 /**
1229  * ftrace_set_notrace - set a function to not trace in ftrace
1230  * @buf - the string that holds the function notrace text.
1231  * @len - the length of the string.
1232  * @reset - non zero to reset all filters before applying this filter.
1233  *
1234  * Notrace Filters denote which functions should not be enabled when tracing
1235  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1236  * for tracing.
1237  */
1238 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1239 {
1240         ftrace_set_regex(buf, len, reset, 0);
1241 }
1242
1243 static int
1244 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1245 {
1246         struct seq_file *m = (struct seq_file *)file->private_data;
1247         struct ftrace_iterator *iter;
1248
1249         mutex_lock(&ftrace_regex_lock);
1250         if (file->f_mode & FMODE_READ) {
1251                 iter = m->private;
1252
1253                 seq_release(inode, file);
1254         } else
1255                 iter = file->private_data;
1256
1257         if (iter->buffer_idx) {
1258                 iter->filtered++;
1259                 iter->buffer[iter->buffer_idx] = 0;
1260                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1261         }
1262
1263         mutex_lock(&ftrace_sysctl_lock);
1264         mutex_lock(&ftraced_lock);
1265         if (iter->filtered && ftraced_suspend && ftrace_enabled)
1266                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1267         mutex_unlock(&ftraced_lock);
1268         mutex_unlock(&ftrace_sysctl_lock);
1269
1270         kfree(iter);
1271         mutex_unlock(&ftrace_regex_lock);
1272         return 0;
1273 }
1274
1275 static int
1276 ftrace_filter_release(struct inode *inode, struct file *file)
1277 {
1278         return ftrace_regex_release(inode, file, 1);
1279 }
1280
1281 static int
1282 ftrace_notrace_release(struct inode *inode, struct file *file)
1283 {
1284         return ftrace_regex_release(inode, file, 0);
1285 }
1286
1287 static ssize_t
1288 ftraced_read(struct file *filp, char __user *ubuf,
1289                      size_t cnt, loff_t *ppos)
1290 {
1291         /* don't worry about races */
1292         char *buf = ftraced_stop ? "disabled\n" : "enabled\n";
1293         int r = strlen(buf);
1294
1295         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1296 }
1297
1298 static ssize_t
1299 ftraced_write(struct file *filp, const char __user *ubuf,
1300                       size_t cnt, loff_t *ppos)
1301 {
1302         char buf[64];
1303         long val;
1304         int ret;
1305
1306         if (cnt >= sizeof(buf))
1307                 return -EINVAL;
1308
1309         if (copy_from_user(&buf, ubuf, cnt))
1310                 return -EFAULT;
1311
1312         if (strncmp(buf, "enable", 6) == 0)
1313                 val = 1;
1314         else if (strncmp(buf, "disable", 7) == 0)
1315                 val = 0;
1316         else {
1317                 buf[cnt] = 0;
1318
1319                 ret = strict_strtoul(buf, 10, &val);
1320                 if (ret < 0)
1321                         return ret;
1322
1323                 val = !!val;
1324         }
1325
1326         if (val)
1327                 ftrace_enable_daemon();
1328         else
1329                 ftrace_disable_daemon();
1330
1331         filp->f_pos += cnt;
1332
1333         return cnt;
1334 }
1335
1336 static struct file_operations ftrace_avail_fops = {
1337         .open = ftrace_avail_open,
1338         .read = seq_read,
1339         .llseek = seq_lseek,
1340         .release = ftrace_avail_release,
1341 };
1342
1343 static struct file_operations ftrace_failures_fops = {
1344         .open = ftrace_failures_open,
1345         .read = seq_read,
1346         .llseek = seq_lseek,
1347         .release = ftrace_avail_release,
1348 };
1349
1350 static struct file_operations ftrace_filter_fops = {
1351         .open = ftrace_filter_open,
1352         .read = ftrace_regex_read,
1353         .write = ftrace_filter_write,
1354         .llseek = ftrace_regex_lseek,
1355         .release = ftrace_filter_release,
1356 };
1357
1358 static struct file_operations ftrace_notrace_fops = {
1359         .open = ftrace_notrace_open,
1360         .read = ftrace_regex_read,
1361         .write = ftrace_notrace_write,
1362         .llseek = ftrace_regex_lseek,
1363         .release = ftrace_notrace_release,
1364 };
1365
1366 static struct file_operations ftraced_fops = {
1367         .open = tracing_open_generic,
1368         .read = ftraced_read,
1369         .write = ftraced_write,
1370 };
1371
1372 /**
1373  * ftrace_force_update - force an update to all recording ftrace functions
1374  */
1375 int ftrace_force_update(void)
1376 {
1377         int ret = 0;
1378
1379         if (unlikely(ftrace_disabled))
1380                 return -ENODEV;
1381
1382         mutex_lock(&ftrace_sysctl_lock);
1383         mutex_lock(&ftraced_lock);
1384
1385         /*
1386          * If ftraced_trigger is not set, then there is nothing
1387          * to update.
1388          */
1389         if (ftraced_trigger && !ftrace_update_code())
1390                 ret = -EBUSY;
1391
1392         mutex_unlock(&ftraced_lock);
1393         mutex_unlock(&ftrace_sysctl_lock);
1394
1395         return ret;
1396 }
1397
1398 static void ftrace_force_shutdown(void)
1399 {
1400         struct task_struct *task;
1401         int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
1402
1403         mutex_lock(&ftraced_lock);
1404         task = ftraced_task;
1405         ftraced_task = NULL;
1406         ftraced_suspend = -1;
1407         ftrace_run_update_code(command);
1408         mutex_unlock(&ftraced_lock);
1409
1410         if (task)
1411                 kthread_stop(task);
1412 }
1413
1414 static __init int ftrace_init_debugfs(void)
1415 {
1416         struct dentry *d_tracer;
1417         struct dentry *entry;
1418
1419         d_tracer = tracing_init_dentry();
1420
1421         entry = debugfs_create_file("available_filter_functions", 0444,
1422                                     d_tracer, NULL, &ftrace_avail_fops);
1423         if (!entry)
1424                 pr_warning("Could not create debugfs "
1425                            "'available_filter_functions' entry\n");
1426
1427         entry = debugfs_create_file("failures", 0444,
1428                                     d_tracer, NULL, &ftrace_failures_fops);
1429         if (!entry)
1430                 pr_warning("Could not create debugfs 'failures' entry\n");
1431
1432         entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1433                                     NULL, &ftrace_filter_fops);
1434         if (!entry)
1435                 pr_warning("Could not create debugfs "
1436                            "'set_ftrace_filter' entry\n");
1437
1438         entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1439                                     NULL, &ftrace_notrace_fops);
1440         if (!entry)
1441                 pr_warning("Could not create debugfs "
1442                            "'set_ftrace_notrace' entry\n");
1443
1444         entry = debugfs_create_file("ftraced_enabled", 0644, d_tracer,
1445                                     NULL, &ftraced_fops);
1446         if (!entry)
1447                 pr_warning("Could not create debugfs "
1448                            "'ftraced_enabled' entry\n");
1449         return 0;
1450 }
1451
1452 fs_initcall(ftrace_init_debugfs);
1453
1454 static int __init ftrace_dynamic_init(void)
1455 {
1456         struct task_struct *p;
1457         unsigned long addr;
1458         int ret;
1459
1460         addr = (unsigned long)ftrace_record_ip;
1461
1462         stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS);
1463
1464         /* ftrace_dyn_arch_init places the return code in addr */
1465         if (addr) {
1466                 ret = (int)addr;
1467                 goto failed;
1468         }
1469
1470         ret = ftrace_dyn_table_alloc();
1471         if (ret)
1472                 goto failed;
1473
1474         p = kthread_run(ftraced, NULL, "ftraced");
1475         if (IS_ERR(p)) {
1476                 ret = -1;
1477                 goto failed;
1478         }
1479
1480         last_ftrace_enabled = ftrace_enabled = 1;
1481         ftraced_task = p;
1482
1483         return 0;
1484
1485  failed:
1486         ftrace_disabled = 1;
1487         return ret;
1488 }
1489
1490 core_initcall(ftrace_dynamic_init);
1491 #else
1492 # define ftrace_startup()               do { } while (0)
1493 # define ftrace_shutdown()              do { } while (0)
1494 # define ftrace_startup_sysctl()        do { } while (0)
1495 # define ftrace_shutdown_sysctl()       do { } while (0)
1496 # define ftrace_force_shutdown()        do { } while (0)
1497 #endif /* CONFIG_DYNAMIC_FTRACE */
1498
1499 /**
1500  * ftrace_kill - totally shutdown ftrace
1501  *
1502  * This is a safety measure. If something was detected that seems
1503  * wrong, calling this function will keep ftrace from doing
1504  * any more modifications, and updates.
1505  * used when something went wrong.
1506  */
1507 void ftrace_kill(void)
1508 {
1509         mutex_lock(&ftrace_sysctl_lock);
1510         ftrace_disabled = 1;
1511         ftrace_enabled = 0;
1512
1513         clear_ftrace_function();
1514         mutex_unlock(&ftrace_sysctl_lock);
1515
1516         /* Try to totally disable ftrace */
1517         ftrace_force_shutdown();
1518 }
1519
1520 /**
1521  * register_ftrace_function - register a function for profiling
1522  * @ops - ops structure that holds the function for profiling.
1523  *
1524  * Register a function to be called by all functions in the
1525  * kernel.
1526  *
1527  * Note: @ops->func and all the functions it calls must be labeled
1528  *       with "notrace", otherwise it will go into a
1529  *       recursive loop.
1530  */
1531 int register_ftrace_function(struct ftrace_ops *ops)
1532 {
1533         int ret;
1534
1535         if (unlikely(ftrace_disabled))
1536                 return -1;
1537
1538         mutex_lock(&ftrace_sysctl_lock);
1539         ret = __register_ftrace_function(ops);
1540         ftrace_startup();
1541         mutex_unlock(&ftrace_sysctl_lock);
1542
1543         return ret;
1544 }
1545
1546 /**
1547  * unregister_ftrace_function - unresgister a function for profiling.
1548  * @ops - ops structure that holds the function to unregister
1549  *
1550  * Unregister a function that was added to be called by ftrace profiling.
1551  */
1552 int unregister_ftrace_function(struct ftrace_ops *ops)
1553 {
1554         int ret;
1555
1556         mutex_lock(&ftrace_sysctl_lock);
1557         ret = __unregister_ftrace_function(ops);
1558         ftrace_shutdown();
1559         mutex_unlock(&ftrace_sysctl_lock);
1560
1561         return ret;
1562 }
1563
1564 int
1565 ftrace_enable_sysctl(struct ctl_table *table, int write,
1566                      struct file *file, void __user *buffer, size_t *lenp,
1567                      loff_t *ppos)
1568 {
1569         int ret;
1570
1571         if (unlikely(ftrace_disabled))
1572                 return -ENODEV;
1573
1574         mutex_lock(&ftrace_sysctl_lock);
1575
1576         ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
1577
1578         if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1579                 goto out;
1580
1581         last_ftrace_enabled = ftrace_enabled;
1582
1583         if (ftrace_enabled) {
1584
1585                 ftrace_startup_sysctl();
1586
1587                 /* we are starting ftrace again */
1588                 if (ftrace_list != &ftrace_list_end) {
1589                         if (ftrace_list->next == &ftrace_list_end)
1590                                 ftrace_trace_function = ftrace_list->func;
1591                         else
1592                                 ftrace_trace_function = ftrace_list_func;
1593                 }
1594
1595         } else {
1596                 /* stopping ftrace calls (just send to ftrace_stub) */
1597                 ftrace_trace_function = ftrace_stub;
1598
1599                 ftrace_shutdown_sysctl();
1600         }
1601
1602  out:
1603         mutex_unlock(&ftrace_sysctl_lock);
1604         return ret;
1605 }