89bd9a6f52ecedbe69f960d2a9ca2d4d2eaf6a5c
[safe/jmp/linux-2.6] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/ftrace.h>
25 #include <linux/sysctl.h>
26 #include <linux/ctype.h>
27 #include <linux/hash.h>
28 #include <linux/list.h>
29
30 #include "trace.h"
31
32 /* ftrace_enabled is a method to turn ftrace on or off */
33 int ftrace_enabled __read_mostly;
34 static int last_ftrace_enabled;
35
36 /*
37  * ftrace_disabled is set when an anomaly is discovered.
38  * ftrace_disabled is much stronger than ftrace_enabled.
39  */
40 static int ftrace_disabled __read_mostly;
41
42 static DEFINE_SPINLOCK(ftrace_lock);
43 static DEFINE_MUTEX(ftrace_sysctl_lock);
44
45 static struct ftrace_ops ftrace_list_end __read_mostly =
46 {
47         .func = ftrace_stub,
48 };
49
50 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
51 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
52
53 void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
54 {
55         struct ftrace_ops *op = ftrace_list;
56
57         /* in case someone actually ports this to alpha! */
58         read_barrier_depends();
59
60         while (op != &ftrace_list_end) {
61                 /* silly alpha */
62                 read_barrier_depends();
63                 op->func(ip, parent_ip);
64                 op = op->next;
65         };
66 }
67
68 /**
69  * clear_ftrace_function - reset the ftrace function
70  *
71  * This NULLs the ftrace function and in essence stops
72  * tracing.  There may be lag
73  */
74 void clear_ftrace_function(void)
75 {
76         ftrace_trace_function = ftrace_stub;
77 }
78
79 static int __register_ftrace_function(struct ftrace_ops *ops)
80 {
81         /* Should never be called by interrupts */
82         spin_lock(&ftrace_lock);
83
84         ops->next = ftrace_list;
85         /*
86          * We are entering ops into the ftrace_list but another
87          * CPU might be walking that list. We need to make sure
88          * the ops->next pointer is valid before another CPU sees
89          * the ops pointer included into the ftrace_list.
90          */
91         smp_wmb();
92         ftrace_list = ops;
93
94         if (ftrace_enabled) {
95                 /*
96                  * For one func, simply call it directly.
97                  * For more than one func, call the chain.
98                  */
99                 if (ops->next == &ftrace_list_end)
100                         ftrace_trace_function = ops->func;
101                 else
102                         ftrace_trace_function = ftrace_list_func;
103         }
104
105         spin_unlock(&ftrace_lock);
106
107         return 0;
108 }
109
110 static int __unregister_ftrace_function(struct ftrace_ops *ops)
111 {
112         struct ftrace_ops **p;
113         int ret = 0;
114
115         spin_lock(&ftrace_lock);
116
117         /*
118          * If we are removing the last function, then simply point
119          * to the ftrace_stub.
120          */
121         if (ftrace_list == ops && ops->next == &ftrace_list_end) {
122                 ftrace_trace_function = ftrace_stub;
123                 ftrace_list = &ftrace_list_end;
124                 goto out;
125         }
126
127         for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
128                 if (*p == ops)
129                         break;
130
131         if (*p != ops) {
132                 ret = -1;
133                 goto out;
134         }
135
136         *p = (*p)->next;
137
138         if (ftrace_enabled) {
139                 /* If we only have one func left, then call that directly */
140                 if (ftrace_list == &ftrace_list_end ||
141                     ftrace_list->next == &ftrace_list_end)
142                         ftrace_trace_function = ftrace_list->func;
143         }
144
145  out:
146         spin_unlock(&ftrace_lock);
147
148         return ret;
149 }
150
151 #ifdef CONFIG_DYNAMIC_FTRACE
152
153 static struct task_struct *ftraced_task;
154 static DECLARE_WAIT_QUEUE_HEAD(ftraced_waiters);
155 static unsigned long ftraced_iteration_counter;
156
157 enum {
158         FTRACE_ENABLE_CALLS             = (1 << 0),
159         FTRACE_DISABLE_CALLS            = (1 << 1),
160         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
161         FTRACE_ENABLE_MCOUNT            = (1 << 3),
162         FTRACE_DISABLE_MCOUNT           = (1 << 4),
163 };
164
165 static int ftrace_filtered;
166
167 static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
168
169 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
170
171 static DEFINE_SPINLOCK(ftrace_shutdown_lock);
172 static DEFINE_MUTEX(ftraced_lock);
173 static DEFINE_MUTEX(ftrace_filter_lock);
174
175 struct ftrace_page {
176         struct ftrace_page      *next;
177         unsigned long           index;
178         struct dyn_ftrace       records[];
179 };
180
181 #define ENTRIES_PER_PAGE \
182   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
183
184 /* estimate from running different kernels */
185 #define NR_TO_INIT              10000
186
187 static struct ftrace_page       *ftrace_pages_start;
188 static struct ftrace_page       *ftrace_pages;
189
190 static int ftraced_trigger;
191 static int ftraced_suspend;
192
193 static int ftrace_record_suspend;
194
195 static struct dyn_ftrace *ftrace_free_records;
196
197 static inline int
198 ftrace_ip_in_hash(unsigned long ip, unsigned long key)
199 {
200         struct dyn_ftrace *p;
201         struct hlist_node *t;
202         int found = 0;
203
204         hlist_for_each_entry(p, t, &ftrace_hash[key], node) {
205                 if (p->ip == ip) {
206                         found = 1;
207                         break;
208                 }
209         }
210
211         return found;
212 }
213
214 static inline void
215 ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
216 {
217         hlist_add_head(&node->node, &ftrace_hash[key]);
218 }
219
220 static void ftrace_free_rec(struct dyn_ftrace *rec)
221 {
222         /* no locking, only called from kstop_machine */
223
224         rec->ip = (unsigned long)ftrace_free_records;
225         ftrace_free_records = rec;
226         rec->flags |= FTRACE_FL_FREE;
227 }
228
229 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
230 {
231         struct dyn_ftrace *rec;
232
233         /* First check for freed records */
234         if (ftrace_free_records) {
235                 rec = ftrace_free_records;
236
237                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
238                         WARN_ON_ONCE(1);
239                         ftrace_free_records = NULL;
240                         ftrace_disabled = 1;
241                         ftrace_enabled = 0;
242                         return NULL;
243                 }
244
245                 ftrace_free_records = (void *)rec->ip;
246                 memset(rec, 0, sizeof(*rec));
247                 return rec;
248         }
249
250         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
251                 if (!ftrace_pages->next)
252                         return NULL;
253                 ftrace_pages = ftrace_pages->next;
254         }
255
256         return &ftrace_pages->records[ftrace_pages->index++];
257 }
258
259 static void
260 ftrace_record_ip(unsigned long ip)
261 {
262         struct dyn_ftrace *node;
263         unsigned long flags;
264         unsigned long key;
265         int resched;
266         int atomic;
267         int cpu;
268
269         if (!ftrace_enabled || ftrace_disabled)
270                 return;
271
272         resched = need_resched();
273         preempt_disable_notrace();
274
275         /*
276          * We simply need to protect against recursion.
277          * Use the the raw version of smp_processor_id and not
278          * __get_cpu_var which can call debug hooks that can
279          * cause a recursive crash here.
280          */
281         cpu = raw_smp_processor_id();
282         per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
283         if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
284                 goto out;
285
286         if (unlikely(ftrace_record_suspend))
287                 goto out;
288
289         key = hash_long(ip, FTRACE_HASHBITS);
290
291         WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
292
293         if (ftrace_ip_in_hash(ip, key))
294                 goto out;
295
296         atomic = irqs_disabled();
297
298         spin_lock_irqsave(&ftrace_shutdown_lock, flags);
299
300         /* This ip may have hit the hash before the lock */
301         if (ftrace_ip_in_hash(ip, key))
302                 goto out_unlock;
303
304         /*
305          * There's a slight race that the ftraced will update the
306          * hash and reset here. If it is already converted, skip it.
307          */
308         if (ftrace_ip_converted(ip))
309                 goto out_unlock;
310
311         node = ftrace_alloc_dyn_node(ip);
312         if (!node)
313                 goto out_unlock;
314
315         node->ip = ip;
316
317         ftrace_add_hash(node, key);
318
319         ftraced_trigger = 1;
320
321  out_unlock:
322         spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
323  out:
324         per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
325
326         /* prevent recursion with scheduler */
327         if (resched)
328                 preempt_enable_no_resched_notrace();
329         else
330                 preempt_enable_notrace();
331 }
332
333 #define FTRACE_ADDR ((long)(ftrace_caller))
334 #define MCOUNT_ADDR ((long)(mcount))
335
336 static void
337 __ftrace_replace_code(struct dyn_ftrace *rec,
338                       unsigned char *old, unsigned char *new, int enable)
339 {
340         unsigned long ip;
341         int failed;
342
343         ip = rec->ip;
344
345         if (ftrace_filtered && enable) {
346                 unsigned long fl;
347                 /*
348                  * If filtering is on:
349                  *
350                  * If this record is set to be filtered and
351                  * is enabled then do nothing.
352                  *
353                  * If this record is set to be filtered and
354                  * it is not enabled, enable it.
355                  *
356                  * If this record is not set to be filtered
357                  * and it is not enabled do nothing.
358                  *
359                  * If this record is not set to be filtered and
360                  * it is enabled, disable it.
361                  */
362                 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
363
364                 if ((fl ==  (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
365                     (fl == 0))
366                         return;
367
368                 /*
369                  * If it is enabled disable it,
370                  * otherwise enable it!
371                  */
372                 if (fl == FTRACE_FL_ENABLED) {
373                         /* swap new and old */
374                         new = old;
375                         old = ftrace_call_replace(ip, FTRACE_ADDR);
376                         rec->flags &= ~FTRACE_FL_ENABLED;
377                 } else {
378                         new = ftrace_call_replace(ip, FTRACE_ADDR);
379                         rec->flags |= FTRACE_FL_ENABLED;
380                 }
381         } else {
382
383                 if (enable)
384                         new = ftrace_call_replace(ip, FTRACE_ADDR);
385                 else
386                         old = ftrace_call_replace(ip, FTRACE_ADDR);
387
388                 if (enable) {
389                         if (rec->flags & FTRACE_FL_ENABLED)
390                                 return;
391                         rec->flags |= FTRACE_FL_ENABLED;
392                 } else {
393                         if (!(rec->flags & FTRACE_FL_ENABLED))
394                                 return;
395                         rec->flags &= ~FTRACE_FL_ENABLED;
396                 }
397         }
398
399         failed = ftrace_modify_code(ip, old, new);
400         if (failed) {
401                 unsigned long key;
402                 /* It is possible that the function hasn't been converted yet */
403                 key = hash_long(ip, FTRACE_HASHBITS);
404                 if (!ftrace_ip_in_hash(ip, key)) {
405                         rec->flags |= FTRACE_FL_FAILED;
406                         ftrace_free_rec(rec);
407                 }
408
409         }
410 }
411
412 static void ftrace_replace_code(int enable)
413 {
414         unsigned char *new = NULL, *old = NULL;
415         struct dyn_ftrace *rec;
416         struct ftrace_page *pg;
417         int i;
418
419         if (enable)
420                 old = ftrace_nop_replace();
421         else
422                 new = ftrace_nop_replace();
423
424         for (pg = ftrace_pages_start; pg; pg = pg->next) {
425                 for (i = 0; i < pg->index; i++) {
426                         rec = &pg->records[i];
427
428                         /* don't modify code that has already faulted */
429                         if (rec->flags & FTRACE_FL_FAILED)
430                                 continue;
431
432                         __ftrace_replace_code(rec, old, new, enable);
433                 }
434         }
435 }
436
437 static void ftrace_shutdown_replenish(void)
438 {
439         if (ftrace_pages->next)
440                 return;
441
442         /* allocate another page */
443         ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
444 }
445
446 static void
447 ftrace_code_disable(struct dyn_ftrace *rec)
448 {
449         unsigned long ip;
450         unsigned char *nop, *call;
451         int failed;
452
453         ip = rec->ip;
454
455         nop = ftrace_nop_replace();
456         call = ftrace_call_replace(ip, MCOUNT_ADDR);
457
458         failed = ftrace_modify_code(ip, call, nop);
459         if (failed) {
460                 rec->flags |= FTRACE_FL_FAILED;
461                 ftrace_free_rec(rec);
462         }
463 }
464
465 static int __ftrace_modify_code(void *data)
466 {
467         unsigned long addr;
468         int *command = data;
469
470         if (*command & FTRACE_ENABLE_CALLS)
471                 ftrace_replace_code(1);
472         else if (*command & FTRACE_DISABLE_CALLS)
473                 ftrace_replace_code(0);
474
475         if (*command & FTRACE_UPDATE_TRACE_FUNC)
476                 ftrace_update_ftrace_func(ftrace_trace_function);
477
478         if (*command & FTRACE_ENABLE_MCOUNT) {
479                 addr = (unsigned long)ftrace_record_ip;
480                 ftrace_mcount_set(&addr);
481         } else if (*command & FTRACE_DISABLE_MCOUNT) {
482                 addr = (unsigned long)ftrace_stub;
483                 ftrace_mcount_set(&addr);
484         }
485
486         return 0;
487 }
488
489 static void ftrace_run_update_code(int command)
490 {
491         stop_machine_run(__ftrace_modify_code, &command, NR_CPUS);
492 }
493
494 static ftrace_func_t saved_ftrace_func;
495
496 static void ftrace_startup(void)
497 {
498         int command = 0;
499
500         if (unlikely(ftrace_disabled))
501                 return;
502
503         mutex_lock(&ftraced_lock);
504         ftraced_suspend++;
505         if (ftraced_suspend == 1)
506                 command |= FTRACE_ENABLE_CALLS;
507
508         if (saved_ftrace_func != ftrace_trace_function) {
509                 saved_ftrace_func = ftrace_trace_function;
510                 command |= FTRACE_UPDATE_TRACE_FUNC;
511         }
512
513         if (!command || !ftrace_enabled)
514                 goto out;
515
516         ftrace_run_update_code(command);
517  out:
518         mutex_unlock(&ftraced_lock);
519 }
520
521 static void ftrace_shutdown(void)
522 {
523         int command = 0;
524
525         if (unlikely(ftrace_disabled))
526                 return;
527
528         mutex_lock(&ftraced_lock);
529         ftraced_suspend--;
530         if (!ftraced_suspend)
531                 command |= FTRACE_DISABLE_CALLS;
532
533         if (saved_ftrace_func != ftrace_trace_function) {
534                 saved_ftrace_func = ftrace_trace_function;
535                 command |= FTRACE_UPDATE_TRACE_FUNC;
536         }
537
538         if (!command || !ftrace_enabled)
539                 goto out;
540
541         ftrace_run_update_code(command);
542  out:
543         mutex_unlock(&ftraced_lock);
544 }
545
546 static void ftrace_startup_sysctl(void)
547 {
548         int command = FTRACE_ENABLE_MCOUNT;
549
550         if (unlikely(ftrace_disabled))
551                 return;
552
553         mutex_lock(&ftraced_lock);
554         /* Force update next time */
555         saved_ftrace_func = NULL;
556         /* ftraced_suspend is true if we want ftrace running */
557         if (ftraced_suspend)
558                 command |= FTRACE_ENABLE_CALLS;
559
560         ftrace_run_update_code(command);
561         mutex_unlock(&ftraced_lock);
562 }
563
564 static void ftrace_shutdown_sysctl(void)
565 {
566         int command = FTRACE_DISABLE_MCOUNT;
567
568         if (unlikely(ftrace_disabled))
569                 return;
570
571         mutex_lock(&ftraced_lock);
572         /* ftraced_suspend is true if ftrace is running */
573         if (ftraced_suspend)
574                 command |= FTRACE_DISABLE_CALLS;
575
576         ftrace_run_update_code(command);
577         mutex_unlock(&ftraced_lock);
578 }
579
580 static cycle_t          ftrace_update_time;
581 static unsigned long    ftrace_update_cnt;
582 unsigned long           ftrace_update_tot_cnt;
583
584 static int __ftrace_update_code(void *ignore)
585 {
586         struct dyn_ftrace *p;
587         struct hlist_head head;
588         struct hlist_node *t;
589         int save_ftrace_enabled;
590         cycle_t start, stop;
591         int i;
592
593         /* Don't be recording funcs now */
594         save_ftrace_enabled = ftrace_enabled;
595         ftrace_enabled = 0;
596
597         start = ftrace_now(raw_smp_processor_id());
598         ftrace_update_cnt = 0;
599
600         /* No locks needed, the machine is stopped! */
601         for (i = 0; i < FTRACE_HASHSIZE; i++) {
602                 if (hlist_empty(&ftrace_hash[i]))
603                         continue;
604
605                 head = ftrace_hash[i];
606                 INIT_HLIST_HEAD(&ftrace_hash[i]);
607
608                 /* all CPUS are stopped, we are safe to modify code */
609                 hlist_for_each_entry(p, t, &head, node) {
610                         ftrace_code_disable(p);
611                         ftrace_update_cnt++;
612                 }
613
614         }
615
616         stop = ftrace_now(raw_smp_processor_id());
617         ftrace_update_time = stop - start;
618         ftrace_update_tot_cnt += ftrace_update_cnt;
619
620         ftrace_enabled = save_ftrace_enabled;
621
622         return 0;
623 }
624
625 static void ftrace_update_code(void)
626 {
627         if (unlikely(ftrace_disabled))
628                 return;
629
630         stop_machine_run(__ftrace_update_code, NULL, NR_CPUS);
631 }
632
633 static int ftraced(void *ignore)
634 {
635         unsigned long usecs;
636
637         while (!kthread_should_stop()) {
638
639                 set_current_state(TASK_INTERRUPTIBLE);
640
641                 /* check once a second */
642                 schedule_timeout(HZ);
643
644                 if (unlikely(ftrace_disabled))
645                         continue;
646
647                 mutex_lock(&ftrace_sysctl_lock);
648                 mutex_lock(&ftraced_lock);
649                 if (ftrace_enabled && ftraced_trigger && !ftraced_suspend) {
650                         ftrace_record_suspend++;
651                         ftrace_update_code();
652                         usecs = nsecs_to_usecs(ftrace_update_time);
653                         if (ftrace_update_tot_cnt > 100000) {
654                                 ftrace_update_tot_cnt = 0;
655                                 pr_info("hm, dftrace overflow: %lu change%s"
656                                          " (%lu total) in %lu usec%s\n",
657                                         ftrace_update_cnt,
658                                         ftrace_update_cnt != 1 ? "s" : "",
659                                         ftrace_update_tot_cnt,
660                                         usecs, usecs != 1 ? "s" : "");
661                                 ftrace_disabled = 1;
662                                 WARN_ON_ONCE(1);
663                         }
664                         ftraced_trigger = 0;
665                         ftrace_record_suspend--;
666                 }
667                 ftraced_iteration_counter++;
668                 mutex_unlock(&ftraced_lock);
669                 mutex_unlock(&ftrace_sysctl_lock);
670
671                 wake_up_interruptible(&ftraced_waiters);
672
673                 ftrace_shutdown_replenish();
674         }
675         __set_current_state(TASK_RUNNING);
676         return 0;
677 }
678
679 static int __init ftrace_dyn_table_alloc(void)
680 {
681         struct ftrace_page *pg;
682         int cnt;
683         int i;
684
685         /* allocate a few pages */
686         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
687         if (!ftrace_pages_start)
688                 return -1;
689
690         /*
691          * Allocate a few more pages.
692          *
693          * TODO: have some parser search vmlinux before
694          *   final linking to find all calls to ftrace.
695          *   Then we can:
696          *    a) know how many pages to allocate.
697          *     and/or
698          *    b) set up the table then.
699          *
700          *  The dynamic code is still necessary for
701          *  modules.
702          */
703
704         pg = ftrace_pages = ftrace_pages_start;
705
706         cnt = NR_TO_INIT / ENTRIES_PER_PAGE;
707
708         for (i = 0; i < cnt; i++) {
709                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
710
711                 /* If we fail, we'll try later anyway */
712                 if (!pg->next)
713                         break;
714
715                 pg = pg->next;
716         }
717
718         return 0;
719 }
720
721 enum {
722         FTRACE_ITER_FILTER      = (1 << 0),
723         FTRACE_ITER_CONT        = (1 << 1),
724 };
725
726 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
727
728 struct ftrace_iterator {
729         loff_t                  pos;
730         struct ftrace_page      *pg;
731         unsigned                idx;
732         unsigned                flags;
733         unsigned char           buffer[FTRACE_BUFF_MAX+1];
734         unsigned                buffer_idx;
735         unsigned                filtered;
736 };
737
738 static void *
739 t_next(struct seq_file *m, void *v, loff_t *pos)
740 {
741         struct ftrace_iterator *iter = m->private;
742         struct dyn_ftrace *rec = NULL;
743
744         (*pos)++;
745
746  retry:
747         if (iter->idx >= iter->pg->index) {
748                 if (iter->pg->next) {
749                         iter->pg = iter->pg->next;
750                         iter->idx = 0;
751                         goto retry;
752                 }
753         } else {
754                 rec = &iter->pg->records[iter->idx++];
755                 if ((rec->flags & FTRACE_FL_FAILED) ||
756                     ((iter->flags & FTRACE_ITER_FILTER) &&
757                      !(rec->flags & FTRACE_FL_FILTER))) {
758                         rec = NULL;
759                         goto retry;
760                 }
761         }
762
763         iter->pos = *pos;
764
765         return rec;
766 }
767
768 static void *t_start(struct seq_file *m, loff_t *pos)
769 {
770         struct ftrace_iterator *iter = m->private;
771         void *p = NULL;
772         loff_t l = -1;
773
774         if (*pos != iter->pos) {
775                 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
776                         ;
777         } else {
778                 l = *pos;
779                 p = t_next(m, p, &l);
780         }
781
782         return p;
783 }
784
785 static void t_stop(struct seq_file *m, void *p)
786 {
787 }
788
789 static int t_show(struct seq_file *m, void *v)
790 {
791         struct dyn_ftrace *rec = v;
792         char str[KSYM_SYMBOL_LEN];
793
794         if (!rec)
795                 return 0;
796
797         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
798
799         seq_printf(m, "%s\n", str);
800
801         return 0;
802 }
803
804 static struct seq_operations show_ftrace_seq_ops = {
805         .start = t_start,
806         .next = t_next,
807         .stop = t_stop,
808         .show = t_show,
809 };
810
811 static int
812 ftrace_avail_open(struct inode *inode, struct file *file)
813 {
814         struct ftrace_iterator *iter;
815         int ret;
816
817         if (unlikely(ftrace_disabled))
818                 return -ENODEV;
819
820         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
821         if (!iter)
822                 return -ENOMEM;
823
824         iter->pg = ftrace_pages_start;
825         iter->pos = -1;
826
827         ret = seq_open(file, &show_ftrace_seq_ops);
828         if (!ret) {
829                 struct seq_file *m = file->private_data;
830
831                 m->private = iter;
832         } else {
833                 kfree(iter);
834         }
835
836         return ret;
837 }
838
839 int ftrace_avail_release(struct inode *inode, struct file *file)
840 {
841         struct seq_file *m = (struct seq_file *)file->private_data;
842         struct ftrace_iterator *iter = m->private;
843
844         seq_release(inode, file);
845         kfree(iter);
846
847         return 0;
848 }
849
850 static void ftrace_filter_reset(void)
851 {
852         struct ftrace_page *pg;
853         struct dyn_ftrace *rec;
854         unsigned i;
855
856         /* keep kstop machine from running */
857         preempt_disable();
858         ftrace_filtered = 0;
859         pg = ftrace_pages_start;
860         while (pg) {
861                 for (i = 0; i < pg->index; i++) {
862                         rec = &pg->records[i];
863                         if (rec->flags & FTRACE_FL_FAILED)
864                                 continue;
865                         rec->flags &= ~FTRACE_FL_FILTER;
866                 }
867                 pg = pg->next;
868         }
869         preempt_enable();
870 }
871
872 static int
873 ftrace_filter_open(struct inode *inode, struct file *file)
874 {
875         struct ftrace_iterator *iter;
876         int ret = 0;
877
878         if (unlikely(ftrace_disabled))
879                 return -ENODEV;
880
881         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
882         if (!iter)
883                 return -ENOMEM;
884
885         mutex_lock(&ftrace_filter_lock);
886         if ((file->f_mode & FMODE_WRITE) &&
887             !(file->f_flags & O_APPEND))
888                 ftrace_filter_reset();
889
890         if (file->f_mode & FMODE_READ) {
891                 iter->pg = ftrace_pages_start;
892                 iter->pos = -1;
893                 iter->flags = FTRACE_ITER_FILTER;
894
895                 ret = seq_open(file, &show_ftrace_seq_ops);
896                 if (!ret) {
897                         struct seq_file *m = file->private_data;
898                         m->private = iter;
899                 } else
900                         kfree(iter);
901         } else
902                 file->private_data = iter;
903         mutex_unlock(&ftrace_filter_lock);
904
905         return ret;
906 }
907
908 static ssize_t
909 ftrace_filter_read(struct file *file, char __user *ubuf,
910                        size_t cnt, loff_t *ppos)
911 {
912         if (file->f_mode & FMODE_READ)
913                 return seq_read(file, ubuf, cnt, ppos);
914         else
915                 return -EPERM;
916 }
917
918 static loff_t
919 ftrace_filter_lseek(struct file *file, loff_t offset, int origin)
920 {
921         loff_t ret;
922
923         if (file->f_mode & FMODE_READ)
924                 ret = seq_lseek(file, offset, origin);
925         else
926                 file->f_pos = ret = 1;
927
928         return ret;
929 }
930
931 enum {
932         MATCH_FULL,
933         MATCH_FRONT_ONLY,
934         MATCH_MIDDLE_ONLY,
935         MATCH_END_ONLY,
936 };
937
938 static void
939 ftrace_match(unsigned char *buff, int len)
940 {
941         char str[KSYM_SYMBOL_LEN];
942         char *search = NULL;
943         struct ftrace_page *pg;
944         struct dyn_ftrace *rec;
945         int type = MATCH_FULL;
946         unsigned i, match = 0, search_len = 0;
947
948         for (i = 0; i < len; i++) {
949                 if (buff[i] == '*') {
950                         if (!i) {
951                                 search = buff + i + 1;
952                                 type = MATCH_END_ONLY;
953                                 search_len = len - (i + 1);
954                         } else {
955                                 if (type == MATCH_END_ONLY) {
956                                         type = MATCH_MIDDLE_ONLY;
957                                 } else {
958                                         match = i;
959                                         type = MATCH_FRONT_ONLY;
960                                 }
961                                 buff[i] = 0;
962                                 break;
963                         }
964                 }
965         }
966
967         /* keep kstop machine from running */
968         preempt_disable();
969         ftrace_filtered = 1;
970         pg = ftrace_pages_start;
971         while (pg) {
972                 for (i = 0; i < pg->index; i++) {
973                         int matched = 0;
974                         char *ptr;
975
976                         rec = &pg->records[i];
977                         if (rec->flags & FTRACE_FL_FAILED)
978                                 continue;
979                         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
980                         switch (type) {
981                         case MATCH_FULL:
982                                 if (strcmp(str, buff) == 0)
983                                         matched = 1;
984                                 break;
985                         case MATCH_FRONT_ONLY:
986                                 if (memcmp(str, buff, match) == 0)
987                                         matched = 1;
988                                 break;
989                         case MATCH_MIDDLE_ONLY:
990                                 if (strstr(str, search))
991                                         matched = 1;
992                                 break;
993                         case MATCH_END_ONLY:
994                                 ptr = strstr(str, search);
995                                 if (ptr && (ptr[search_len] == 0))
996                                         matched = 1;
997                                 break;
998                         }
999                         if (matched)
1000                                 rec->flags |= FTRACE_FL_FILTER;
1001                 }
1002                 pg = pg->next;
1003         }
1004         preempt_enable();
1005 }
1006
1007 static ssize_t
1008 ftrace_filter_write(struct file *file, const char __user *ubuf,
1009                     size_t cnt, loff_t *ppos)
1010 {
1011         struct ftrace_iterator *iter;
1012         char ch;
1013         size_t read = 0;
1014         ssize_t ret;
1015
1016         if (!cnt || cnt < 0)
1017                 return 0;
1018
1019         mutex_lock(&ftrace_filter_lock);
1020
1021         if (file->f_mode & FMODE_READ) {
1022                 struct seq_file *m = file->private_data;
1023                 iter = m->private;
1024         } else
1025                 iter = file->private_data;
1026
1027         if (!*ppos) {
1028                 iter->flags &= ~FTRACE_ITER_CONT;
1029                 iter->buffer_idx = 0;
1030         }
1031
1032         ret = get_user(ch, ubuf++);
1033         if (ret)
1034                 goto out;
1035         read++;
1036         cnt--;
1037
1038         if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1039                 /* skip white space */
1040                 while (cnt && isspace(ch)) {
1041                         ret = get_user(ch, ubuf++);
1042                         if (ret)
1043                                 goto out;
1044                         read++;
1045                         cnt--;
1046                 }
1047
1048
1049                 if (isspace(ch)) {
1050                         file->f_pos += read;
1051                         ret = read;
1052                         goto out;
1053                 }
1054
1055                 iter->buffer_idx = 0;
1056         }
1057
1058         while (cnt && !isspace(ch)) {
1059                 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1060                         iter->buffer[iter->buffer_idx++] = ch;
1061                 else {
1062                         ret = -EINVAL;
1063                         goto out;
1064                 }
1065                 ret = get_user(ch, ubuf++);
1066                 if (ret)
1067                         goto out;
1068                 read++;
1069                 cnt--;
1070         }
1071
1072         if (isspace(ch)) {
1073                 iter->filtered++;
1074                 iter->buffer[iter->buffer_idx] = 0;
1075                 ftrace_match(iter->buffer, iter->buffer_idx);
1076                 iter->buffer_idx = 0;
1077         } else
1078                 iter->flags |= FTRACE_ITER_CONT;
1079
1080
1081         file->f_pos += read;
1082
1083         ret = read;
1084  out:
1085         mutex_unlock(&ftrace_filter_lock);
1086
1087         return ret;
1088 }
1089
1090 /**
1091  * ftrace_set_filter - set a function to filter on in ftrace
1092  * @buf - the string that holds the function filter text.
1093  * @len - the length of the string.
1094  * @reset - non zero to reset all filters before applying this filter.
1095  *
1096  * Filters denote which functions should be enabled when tracing is enabled.
1097  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1098  */
1099 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1100 {
1101         if (unlikely(ftrace_disabled))
1102                 return;
1103
1104         mutex_lock(&ftrace_filter_lock);
1105         if (reset)
1106                 ftrace_filter_reset();
1107         if (buf)
1108                 ftrace_match(buf, len);
1109         mutex_unlock(&ftrace_filter_lock);
1110 }
1111
1112 static int
1113 ftrace_filter_release(struct inode *inode, struct file *file)
1114 {
1115         struct seq_file *m = (struct seq_file *)file->private_data;
1116         struct ftrace_iterator *iter;
1117
1118         mutex_lock(&ftrace_filter_lock);
1119         if (file->f_mode & FMODE_READ) {
1120                 iter = m->private;
1121
1122                 seq_release(inode, file);
1123         } else
1124                 iter = file->private_data;
1125
1126         if (iter->buffer_idx) {
1127                 iter->filtered++;
1128                 iter->buffer[iter->buffer_idx] = 0;
1129                 ftrace_match(iter->buffer, iter->buffer_idx);
1130         }
1131
1132         mutex_lock(&ftrace_sysctl_lock);
1133         mutex_lock(&ftraced_lock);
1134         if (iter->filtered && ftraced_suspend && ftrace_enabled)
1135                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1136         mutex_unlock(&ftraced_lock);
1137         mutex_unlock(&ftrace_sysctl_lock);
1138
1139         kfree(iter);
1140         mutex_unlock(&ftrace_filter_lock);
1141         return 0;
1142 }
1143
1144 static struct file_operations ftrace_avail_fops = {
1145         .open = ftrace_avail_open,
1146         .read = seq_read,
1147         .llseek = seq_lseek,
1148         .release = ftrace_avail_release,
1149 };
1150
1151 static struct file_operations ftrace_filter_fops = {
1152         .open = ftrace_filter_open,
1153         .read = ftrace_filter_read,
1154         .write = ftrace_filter_write,
1155         .llseek = ftrace_filter_lseek,
1156         .release = ftrace_filter_release,
1157 };
1158
1159 /**
1160  * ftrace_force_update - force an update to all recording ftrace functions
1161  *
1162  * The ftrace dynamic update daemon only wakes up once a second.
1163  * There may be cases where an update needs to be done immediately
1164  * for tests or internal kernel tracing to begin. This function
1165  * wakes the daemon to do an update and will not return until the
1166  * update is complete.
1167  */
1168 int ftrace_force_update(void)
1169 {
1170         unsigned long last_counter;
1171         DECLARE_WAITQUEUE(wait, current);
1172         int ret = 0;
1173
1174         if (unlikely(ftrace_disabled))
1175                 return -ENODEV;
1176
1177         mutex_lock(&ftraced_lock);
1178         last_counter = ftraced_iteration_counter;
1179
1180         set_current_state(TASK_INTERRUPTIBLE);
1181         add_wait_queue(&ftraced_waiters, &wait);
1182
1183         if (unlikely(!ftraced_task)) {
1184                 ret = -ENODEV;
1185                 goto out;
1186         }
1187
1188         do {
1189                 mutex_unlock(&ftraced_lock);
1190                 wake_up_process(ftraced_task);
1191                 schedule();
1192                 mutex_lock(&ftraced_lock);
1193                 if (signal_pending(current)) {
1194                         ret = -EINTR;
1195                         break;
1196                 }
1197                 set_current_state(TASK_INTERRUPTIBLE);
1198         } while (last_counter == ftraced_iteration_counter);
1199
1200  out:
1201         mutex_unlock(&ftraced_lock);
1202         remove_wait_queue(&ftraced_waiters, &wait);
1203         set_current_state(TASK_RUNNING);
1204
1205         return ret;
1206 }
1207
1208 static void ftrace_force_shutdown(void)
1209 {
1210         struct task_struct *task;
1211         int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
1212
1213         mutex_lock(&ftraced_lock);
1214         task = ftraced_task;
1215         ftraced_task = NULL;
1216         ftraced_suspend = -1;
1217         ftrace_run_update_code(command);
1218         mutex_unlock(&ftraced_lock);
1219
1220         if (task)
1221                 kthread_stop(task);
1222 }
1223
1224 static __init int ftrace_init_debugfs(void)
1225 {
1226         struct dentry *d_tracer;
1227         struct dentry *entry;
1228
1229         d_tracer = tracing_init_dentry();
1230
1231         entry = debugfs_create_file("available_filter_functions", 0444,
1232                                     d_tracer, NULL, &ftrace_avail_fops);
1233         if (!entry)
1234                 pr_warning("Could not create debugfs "
1235                            "'available_filter_functions' entry\n");
1236
1237         entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1238                                     NULL, &ftrace_filter_fops);
1239         if (!entry)
1240                 pr_warning("Could not create debugfs "
1241                            "'set_ftrace_filter' entry\n");
1242         return 0;
1243 }
1244
1245 fs_initcall(ftrace_init_debugfs);
1246
1247 static int __init ftrace_dynamic_init(void)
1248 {
1249         struct task_struct *p;
1250         unsigned long addr;
1251         int ret;
1252
1253         addr = (unsigned long)ftrace_record_ip;
1254
1255         stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS);
1256
1257         /* ftrace_dyn_arch_init places the return code in addr */
1258         if (addr) {
1259                 ret = (int)addr;
1260                 goto failed;
1261         }
1262
1263         ret = ftrace_dyn_table_alloc();
1264         if (ret)
1265                 goto failed;
1266
1267         p = kthread_run(ftraced, NULL, "ftraced");
1268         if (IS_ERR(p)) {
1269                 ret = -1;
1270                 goto failed;
1271         }
1272
1273         last_ftrace_enabled = ftrace_enabled = 1;
1274         ftraced_task = p;
1275
1276         return 0;
1277
1278  failed:
1279         ftrace_disabled = 1;
1280         return ret;
1281 }
1282
1283 core_initcall(ftrace_dynamic_init);
1284 #else
1285 # define ftrace_startup()               do { } while (0)
1286 # define ftrace_shutdown()              do { } while (0)
1287 # define ftrace_startup_sysctl()        do { } while (0)
1288 # define ftrace_shutdown_sysctl()       do { } while (0)
1289 # define ftrace_force_shutdown()        do { } while (0)
1290 #endif /* CONFIG_DYNAMIC_FTRACE */
1291
1292 /**
1293  * ftrace_kill - totally shutdown ftrace
1294  *
1295  * This is a safety measure. If something was detected that seems
1296  * wrong, calling this function will keep ftrace from doing
1297  * any more modifications, and updates.
1298  * used when something went wrong.
1299  */
1300 void ftrace_kill(void)
1301 {
1302         mutex_lock(&ftrace_sysctl_lock);
1303         ftrace_disabled = 1;
1304         ftrace_enabled = 0;
1305
1306         clear_ftrace_function();
1307         mutex_unlock(&ftrace_sysctl_lock);
1308
1309         /* Try to totally disable ftrace */
1310         ftrace_force_shutdown();
1311 }
1312
1313 /**
1314  * register_ftrace_function - register a function for profiling
1315  * @ops - ops structure that holds the function for profiling.
1316  *
1317  * Register a function to be called by all functions in the
1318  * kernel.
1319  *
1320  * Note: @ops->func and all the functions it calls must be labeled
1321  *       with "notrace", otherwise it will go into a
1322  *       recursive loop.
1323  */
1324 int register_ftrace_function(struct ftrace_ops *ops)
1325 {
1326         int ret;
1327
1328         if (unlikely(ftrace_disabled))
1329                 return -1;
1330
1331         mutex_lock(&ftrace_sysctl_lock);
1332         ret = __register_ftrace_function(ops);
1333         ftrace_startup();
1334         mutex_unlock(&ftrace_sysctl_lock);
1335
1336         return ret;
1337 }
1338
1339 /**
1340  * unregister_ftrace_function - unresgister a function for profiling.
1341  * @ops - ops structure that holds the function to unregister
1342  *
1343  * Unregister a function that was added to be called by ftrace profiling.
1344  */
1345 int unregister_ftrace_function(struct ftrace_ops *ops)
1346 {
1347         int ret;
1348
1349         mutex_lock(&ftrace_sysctl_lock);
1350         ret = __unregister_ftrace_function(ops);
1351         ftrace_shutdown();
1352         mutex_unlock(&ftrace_sysctl_lock);
1353
1354         return ret;
1355 }
1356
1357 int
1358 ftrace_enable_sysctl(struct ctl_table *table, int write,
1359                      struct file *file, void __user *buffer, size_t *lenp,
1360                      loff_t *ppos)
1361 {
1362         int ret;
1363
1364         if (unlikely(ftrace_disabled))
1365                 return -ENODEV;
1366
1367         mutex_lock(&ftrace_sysctl_lock);
1368
1369         ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
1370
1371         if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1372                 goto out;
1373
1374         last_ftrace_enabled = ftrace_enabled;
1375
1376         if (ftrace_enabled) {
1377
1378                 ftrace_startup_sysctl();
1379
1380                 /* we are starting ftrace again */
1381                 if (ftrace_list != &ftrace_list_end) {
1382                         if (ftrace_list->next == &ftrace_list_end)
1383                                 ftrace_trace_function = ftrace_list->func;
1384                         else
1385                                 ftrace_trace_function = ftrace_list_func;
1386                 }
1387
1388         } else {
1389                 /* stopping ftrace calls (just send to ftrace_stub) */
1390                 ftrace_trace_function = ftrace_stub;
1391
1392                 ftrace_shutdown_sysctl();
1393         }
1394
1395  out:
1396         mutex_unlock(&ftrace_sysctl_lock);
1397         return ret;
1398 }