tracing/kprobe: Drop function argument access syntax
[safe/jmp/linux-2.6] / kernel / trace / trace_kprobe.c
1 /*
2  * Kprobes-based tracing events
3  *
4  * Created by Masami Hiramatsu <mhiramat@redhat.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
18  */
19
20 #include <linux/module.h>
21 #include <linux/uaccess.h>
22 #include <linux/kprobes.h>
23 #include <linux/seq_file.h>
24 #include <linux/slab.h>
25 #include <linux/smp.h>
26 #include <linux/debugfs.h>
27 #include <linux/types.h>
28 #include <linux/string.h>
29 #include <linux/ctype.h>
30 #include <linux/ptrace.h>
31 #include <linux/perf_event.h>
32
33 #include "trace.h"
34 #include "trace_output.h"
35
36 #define MAX_TRACE_ARGS 128
37 #define MAX_ARGSTR_LEN 63
38 #define MAX_EVENT_NAME_LEN 64
39 #define KPROBE_EVENT_SYSTEM "kprobes"
40
41 /* Reserved field names */
42 #define FIELD_STRING_IP "__probe_ip"
43 #define FIELD_STRING_NARGS "__probe_nargs"
44 #define FIELD_STRING_RETIP "__probe_ret_ip"
45 #define FIELD_STRING_FUNC "__probe_func"
46
47 const char *reserved_field_names[] = {
48         "common_type",
49         "common_flags",
50         "common_preempt_count",
51         "common_pid",
52         "common_tgid",
53         "common_lock_depth",
54         FIELD_STRING_IP,
55         FIELD_STRING_NARGS,
56         FIELD_STRING_RETIP,
57         FIELD_STRING_FUNC,
58 };
59
60 struct fetch_func {
61         unsigned long (*func)(struct pt_regs *, void *);
62         void *data;
63 };
64
65 static __kprobes unsigned long call_fetch(struct fetch_func *f,
66                                           struct pt_regs *regs)
67 {
68         return f->func(regs, f->data);
69 }
70
71 /* fetch handlers */
72 static __kprobes unsigned long fetch_register(struct pt_regs *regs,
73                                               void *offset)
74 {
75         return regs_get_register(regs, (unsigned int)((unsigned long)offset));
76 }
77
78 static __kprobes unsigned long fetch_stack(struct pt_regs *regs,
79                                            void *num)
80 {
81         return regs_get_kernel_stack_nth(regs,
82                                          (unsigned int)((unsigned long)num));
83 }
84
85 static __kprobes unsigned long fetch_memory(struct pt_regs *regs, void *addr)
86 {
87         unsigned long retval;
88
89         if (probe_kernel_address(addr, retval))
90                 return 0;
91         return retval;
92 }
93
94 static __kprobes unsigned long fetch_retvalue(struct pt_regs *regs,
95                                               void *dummy)
96 {
97         return regs_return_value(regs);
98 }
99
100 static __kprobes unsigned long fetch_stack_address(struct pt_regs *regs,
101                                                    void *dummy)
102 {
103         return kernel_stack_pointer(regs);
104 }
105
106 /* Memory fetching by symbol */
107 struct symbol_cache {
108         char *symbol;
109         long offset;
110         unsigned long addr;
111 };
112
113 static unsigned long update_symbol_cache(struct symbol_cache *sc)
114 {
115         sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol);
116         if (sc->addr)
117                 sc->addr += sc->offset;
118         return sc->addr;
119 }
120
121 static void free_symbol_cache(struct symbol_cache *sc)
122 {
123         kfree(sc->symbol);
124         kfree(sc);
125 }
126
127 static struct symbol_cache *alloc_symbol_cache(const char *sym, long offset)
128 {
129         struct symbol_cache *sc;
130
131         if (!sym || strlen(sym) == 0)
132                 return NULL;
133         sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL);
134         if (!sc)
135                 return NULL;
136
137         sc->symbol = kstrdup(sym, GFP_KERNEL);
138         if (!sc->symbol) {
139                 kfree(sc);
140                 return NULL;
141         }
142         sc->offset = offset;
143
144         update_symbol_cache(sc);
145         return sc;
146 }
147
148 static __kprobes unsigned long fetch_symbol(struct pt_regs *regs, void *data)
149 {
150         struct symbol_cache *sc = data;
151
152         if (sc->addr)
153                 return fetch_memory(regs, (void *)sc->addr);
154         else
155                 return 0;
156 }
157
158 /* Special indirect memory access interface */
159 struct indirect_fetch_data {
160         struct fetch_func orig;
161         long offset;
162 };
163
164 static __kprobes unsigned long fetch_indirect(struct pt_regs *regs, void *data)
165 {
166         struct indirect_fetch_data *ind = data;
167         unsigned long addr;
168
169         addr = call_fetch(&ind->orig, regs);
170         if (addr) {
171                 addr += ind->offset;
172                 return fetch_memory(regs, (void *)addr);
173         } else
174                 return 0;
175 }
176
177 static __kprobes void free_indirect_fetch_data(struct indirect_fetch_data *data)
178 {
179         if (data->orig.func == fetch_indirect)
180                 free_indirect_fetch_data(data->orig.data);
181         else if (data->orig.func == fetch_symbol)
182                 free_symbol_cache(data->orig.data);
183         kfree(data);
184 }
185
186 /**
187  * Kprobe event core functions
188  */
189
190 struct probe_arg {
191         struct fetch_func       fetch;
192         const char              *name;
193 };
194
195 /* Flags for trace_probe */
196 #define TP_FLAG_TRACE   1
197 #define TP_FLAG_PROFILE 2
198
199 struct trace_probe {
200         struct list_head        list;
201         struct kretprobe        rp;     /* Use rp.kp for kprobe use */
202         unsigned long           nhit;
203         unsigned int            flags;  /* For TP_FLAG_* */
204         const char              *symbol;        /* symbol name */
205         struct ftrace_event_call        call;
206         struct trace_event              event;
207         unsigned int            nr_args;
208         struct probe_arg        args[];
209 };
210
211 #define SIZEOF_TRACE_PROBE(n)                   \
212         (offsetof(struct trace_probe, args) +   \
213         (sizeof(struct probe_arg) * (n)))
214
215 static __kprobes int probe_is_return(struct trace_probe *tp)
216 {
217         return tp->rp.handler != NULL;
218 }
219
220 static __kprobes const char *probe_symbol(struct trace_probe *tp)
221 {
222         return tp->symbol ? tp->symbol : "unknown";
223 }
224
225 static int probe_arg_string(char *buf, size_t n, struct fetch_func *ff)
226 {
227         int ret = -EINVAL;
228
229         if (ff->func == fetch_register) {
230                 const char *name;
231                 name = regs_query_register_name((unsigned int)((long)ff->data));
232                 ret = snprintf(buf, n, "%%%s", name);
233         } else if (ff->func == fetch_stack)
234                 ret = snprintf(buf, n, "$stack%lu", (unsigned long)ff->data);
235         else if (ff->func == fetch_memory)
236                 ret = snprintf(buf, n, "@0x%p", ff->data);
237         else if (ff->func == fetch_symbol) {
238                 struct symbol_cache *sc = ff->data;
239                 if (sc->offset)
240                         ret = snprintf(buf, n, "@%s%+ld", sc->symbol,
241                                         sc->offset);
242                 else
243                         ret = snprintf(buf, n, "@%s", sc->symbol);
244         } else if (ff->func == fetch_retvalue)
245                 ret = snprintf(buf, n, "$retval");
246         else if (ff->func == fetch_stack_address)
247                 ret = snprintf(buf, n, "$stack");
248         else if (ff->func == fetch_indirect) {
249                 struct indirect_fetch_data *id = ff->data;
250                 size_t l = 0;
251                 ret = snprintf(buf, n, "%+ld(", id->offset);
252                 if (ret >= n)
253                         goto end;
254                 l += ret;
255                 ret = probe_arg_string(buf + l, n - l, &id->orig);
256                 if (ret < 0)
257                         goto end;
258                 l += ret;
259                 ret = snprintf(buf + l, n - l, ")");
260                 ret += l;
261         }
262 end:
263         if (ret >= n)
264                 return -ENOSPC;
265         return ret;
266 }
267
268 static int register_probe_event(struct trace_probe *tp);
269 static void unregister_probe_event(struct trace_probe *tp);
270
271 static DEFINE_MUTEX(probe_lock);
272 static LIST_HEAD(probe_list);
273
274 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
275 static int kretprobe_dispatcher(struct kretprobe_instance *ri,
276                                 struct pt_regs *regs);
277
278 /* Check the name is good for event/group */
279 static int check_event_name(const char *name)
280 {
281         if (!isalpha(*name) && *name != '_')
282                 return 0;
283         while (*++name != '\0') {
284                 if (!isalpha(*name) && !isdigit(*name) && *name != '_')
285                         return 0;
286         }
287         return 1;
288 }
289
290 /*
291  * Allocate new trace_probe and initialize it (including kprobes).
292  */
293 static struct trace_probe *alloc_trace_probe(const char *group,
294                                              const char *event,
295                                              void *addr,
296                                              const char *symbol,
297                                              unsigned long offs,
298                                              int nargs, int is_return)
299 {
300         struct trace_probe *tp;
301         int ret = -ENOMEM;
302
303         tp = kzalloc(SIZEOF_TRACE_PROBE(nargs), GFP_KERNEL);
304         if (!tp)
305                 return ERR_PTR(ret);
306
307         if (symbol) {
308                 tp->symbol = kstrdup(symbol, GFP_KERNEL);
309                 if (!tp->symbol)
310                         goto error;
311                 tp->rp.kp.symbol_name = tp->symbol;
312                 tp->rp.kp.offset = offs;
313         } else
314                 tp->rp.kp.addr = addr;
315
316         if (is_return)
317                 tp->rp.handler = kretprobe_dispatcher;
318         else
319                 tp->rp.kp.pre_handler = kprobe_dispatcher;
320
321         if (!event || !check_event_name(event)) {
322                 ret = -EINVAL;
323                 goto error;
324         }
325
326         tp->call.name = kstrdup(event, GFP_KERNEL);
327         if (!tp->call.name)
328                 goto error;
329
330         if (!group || !check_event_name(group)) {
331                 ret = -EINVAL;
332                 goto error;
333         }
334
335         tp->call.system = kstrdup(group, GFP_KERNEL);
336         if (!tp->call.system)
337                 goto error;
338
339         INIT_LIST_HEAD(&tp->list);
340         return tp;
341 error:
342         kfree(tp->call.name);
343         kfree(tp->symbol);
344         kfree(tp);
345         return ERR_PTR(ret);
346 }
347
348 static void free_probe_arg(struct probe_arg *arg)
349 {
350         if (arg->fetch.func == fetch_symbol)
351                 free_symbol_cache(arg->fetch.data);
352         else if (arg->fetch.func == fetch_indirect)
353                 free_indirect_fetch_data(arg->fetch.data);
354         kfree(arg->name);
355 }
356
357 static void free_trace_probe(struct trace_probe *tp)
358 {
359         int i;
360
361         for (i = 0; i < tp->nr_args; i++)
362                 free_probe_arg(&tp->args[i]);
363
364         kfree(tp->call.system);
365         kfree(tp->call.name);
366         kfree(tp->symbol);
367         kfree(tp);
368 }
369
370 static struct trace_probe *find_probe_event(const char *event,
371                                             const char *group)
372 {
373         struct trace_probe *tp;
374
375         list_for_each_entry(tp, &probe_list, list)
376                 if (strcmp(tp->call.name, event) == 0 &&
377                     strcmp(tp->call.system, group) == 0)
378                         return tp;
379         return NULL;
380 }
381
382 /* Unregister a trace_probe and probe_event: call with locking probe_lock */
383 static void unregister_trace_probe(struct trace_probe *tp)
384 {
385         if (probe_is_return(tp))
386                 unregister_kretprobe(&tp->rp);
387         else
388                 unregister_kprobe(&tp->rp.kp);
389         list_del(&tp->list);
390         unregister_probe_event(tp);
391 }
392
393 /* Register a trace_probe and probe_event */
394 static int register_trace_probe(struct trace_probe *tp)
395 {
396         struct trace_probe *old_tp;
397         int ret;
398
399         mutex_lock(&probe_lock);
400
401         /* register as an event */
402         old_tp = find_probe_event(tp->call.name, tp->call.system);
403         if (old_tp) {
404                 /* delete old event */
405                 unregister_trace_probe(old_tp);
406                 free_trace_probe(old_tp);
407         }
408         ret = register_probe_event(tp);
409         if (ret) {
410                 pr_warning("Faild to register probe event(%d)\n", ret);
411                 goto end;
412         }
413
414         tp->rp.kp.flags |= KPROBE_FLAG_DISABLED;
415         if (probe_is_return(tp))
416                 ret = register_kretprobe(&tp->rp);
417         else
418                 ret = register_kprobe(&tp->rp.kp);
419
420         if (ret) {
421                 pr_warning("Could not insert probe(%d)\n", ret);
422                 if (ret == -EILSEQ) {
423                         pr_warning("Probing address(0x%p) is not an "
424                                    "instruction boundary.\n",
425                                    tp->rp.kp.addr);
426                         ret = -EINVAL;
427                 }
428                 unregister_probe_event(tp);
429         } else
430                 list_add_tail(&tp->list, &probe_list);
431 end:
432         mutex_unlock(&probe_lock);
433         return ret;
434 }
435
436 /* Split symbol and offset. */
437 static int split_symbol_offset(char *symbol, unsigned long *offset)
438 {
439         char *tmp;
440         int ret;
441
442         if (!offset)
443                 return -EINVAL;
444
445         tmp = strchr(symbol, '+');
446         if (tmp) {
447                 /* skip sign because strict_strtol doesn't accept '+' */
448                 ret = strict_strtoul(tmp + 1, 0, offset);
449                 if (ret)
450                         return ret;
451                 *tmp = '\0';
452         } else
453                 *offset = 0;
454         return 0;
455 }
456
457 #define PARAM_MAX_ARGS 16
458 #define PARAM_MAX_STACK (THREAD_SIZE / sizeof(unsigned long))
459
460 static int parse_probe_vars(char *arg, struct fetch_func *ff, int is_return)
461 {
462         int ret = 0;
463         unsigned long param;
464
465         if (strcmp(arg, "retval") == 0) {
466                 if (is_return) {
467                         ff->func = fetch_retvalue;
468                         ff->data = NULL;
469                 } else
470                         ret = -EINVAL;
471         } else if (strncmp(arg, "stack", 5) == 0) {
472                 if (arg[5] == '\0') {
473                         ff->func = fetch_stack_address;
474                         ff->data = NULL;
475                 } else if (isdigit(arg[5])) {
476                         ret = strict_strtoul(arg + 5, 10, &param);
477                         if (ret || param > PARAM_MAX_STACK)
478                                 ret = -EINVAL;
479                         else {
480                                 ff->func = fetch_stack;
481                                 ff->data = (void *)param;
482                         }
483                 } else
484                         ret = -EINVAL;
485         } else
486                 ret = -EINVAL;
487         return ret;
488 }
489
490 /* Recursive argument parser */
491 static int __parse_probe_arg(char *arg, struct fetch_func *ff, int is_return)
492 {
493         int ret = 0;
494         unsigned long param;
495         long offset;
496         char *tmp;
497
498         switch (arg[0]) {
499         case '$':
500                 ret = parse_probe_vars(arg + 1, ff, is_return);
501                 break;
502         case '%':       /* named register */
503                 ret = regs_query_register_offset(arg + 1);
504                 if (ret >= 0) {
505                         ff->func = fetch_register;
506                         ff->data = (void *)(unsigned long)ret;
507                         ret = 0;
508                 }
509                 break;
510         case '@':       /* memory or symbol */
511                 if (isdigit(arg[1])) {
512                         ret = strict_strtoul(arg + 1, 0, &param);
513                         if (ret)
514                                 break;
515                         ff->func = fetch_memory;
516                         ff->data = (void *)param;
517                 } else {
518                         ret = split_symbol_offset(arg + 1, &offset);
519                         if (ret)
520                                 break;
521                         ff->data = alloc_symbol_cache(arg + 1, offset);
522                         if (ff->data)
523                                 ff->func = fetch_symbol;
524                         else
525                                 ret = -EINVAL;
526                 }
527                 break;
528         case '+':       /* indirect memory */
529         case '-':
530                 tmp = strchr(arg, '(');
531                 if (!tmp) {
532                         ret = -EINVAL;
533                         break;
534                 }
535                 *tmp = '\0';
536                 ret = strict_strtol(arg + 1, 0, &offset);
537                 if (ret)
538                         break;
539                 if (arg[0] == '-')
540                         offset = -offset;
541                 arg = tmp + 1;
542                 tmp = strrchr(arg, ')');
543                 if (tmp) {
544                         struct indirect_fetch_data *id;
545                         *tmp = '\0';
546                         id = kzalloc(sizeof(struct indirect_fetch_data),
547                                      GFP_KERNEL);
548                         if (!id)
549                                 return -ENOMEM;
550                         id->offset = offset;
551                         ret = __parse_probe_arg(arg, &id->orig, is_return);
552                         if (ret)
553                                 kfree(id);
554                         else {
555                                 ff->func = fetch_indirect;
556                                 ff->data = (void *)id;
557                         }
558                 } else
559                         ret = -EINVAL;
560                 break;
561         default:
562                 /* TODO: support custom handler */
563                 ret = -EINVAL;
564         }
565         return ret;
566 }
567
568 /* String length checking wrapper */
569 static int parse_probe_arg(char *arg, struct fetch_func *ff, int is_return)
570 {
571         if (strlen(arg) > MAX_ARGSTR_LEN) {
572                 pr_info("Argument is too long.: %s\n",  arg);
573                 return -ENOSPC;
574         }
575         return __parse_probe_arg(arg, ff, is_return);
576 }
577
578 /* Return 1 if name is reserved or already used by another argument */
579 static int conflict_field_name(const char *name,
580                                struct probe_arg *args, int narg)
581 {
582         int i;
583         for (i = 0; i < ARRAY_SIZE(reserved_field_names); i++)
584                 if (strcmp(reserved_field_names[i], name) == 0)
585                         return 1;
586         for (i = 0; i < narg; i++)
587                 if (strcmp(args[i].name, name) == 0)
588                         return 1;
589         return 0;
590 }
591
592 static int create_trace_probe(int argc, char **argv)
593 {
594         /*
595          * Argument syntax:
596          *  - Add kprobe: p[:[GRP/]EVENT] KSYM[+OFFS]|KADDR [FETCHARGS]
597          *  - Add kretprobe: r[:[GRP/]EVENT] KSYM[+0] [FETCHARGS]
598          * Fetch args:
599          *  $retval     : fetch return value
600          *  $stack      : fetch stack address
601          *  $stackN     : fetch Nth of stack (N:0-)
602          *  @ADDR       : fetch memory at ADDR (ADDR should be in kernel)
603          *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
604          *  %REG        : fetch register REG
605          * Indirect memory fetch:
606          *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
607          * Alias name of args:
608          *  NAME=FETCHARG : set NAME as alias of FETCHARG.
609          */
610         struct trace_probe *tp;
611         int i, ret = 0;
612         int is_return = 0, is_delete = 0;
613         char *symbol = NULL, *event = NULL, *arg = NULL, *group = NULL;
614         unsigned long offset = 0;
615         void *addr = NULL;
616         char buf[MAX_EVENT_NAME_LEN];
617
618         /* argc must be >= 1 */
619         if (argv[0][0] == 'p')
620                 is_return = 0;
621         else if (argv[0][0] == 'r')
622                 is_return = 1;
623         else if (argv[0][0] == '-')
624                 is_delete = 1;
625         else {
626                 pr_info("Probe definition must be started with 'p', 'r' or"
627                         " '-'.\n");
628                 return -EINVAL;
629         }
630
631         if (argv[0][1] == ':') {
632                 event = &argv[0][2];
633                 if (strchr(event, '/')) {
634                         group = event;
635                         event = strchr(group, '/') + 1;
636                         event[-1] = '\0';
637                         if (strlen(group) == 0) {
638                                 pr_info("Group name is not specifiled\n");
639                                 return -EINVAL;
640                         }
641                 }
642                 if (strlen(event) == 0) {
643                         pr_info("Event name is not specifiled\n");
644                         return -EINVAL;
645                 }
646         }
647         if (!group)
648                 group = KPROBE_EVENT_SYSTEM;
649
650         if (is_delete) {
651                 if (!event) {
652                         pr_info("Delete command needs an event name.\n");
653                         return -EINVAL;
654                 }
655                 tp = find_probe_event(event, group);
656                 if (!tp) {
657                         pr_info("Event %s/%s doesn't exist.\n", group, event);
658                         return -ENOENT;
659                 }
660                 /* delete an event */
661                 unregister_trace_probe(tp);
662                 free_trace_probe(tp);
663                 return 0;
664         }
665
666         if (argc < 2) {
667                 pr_info("Probe point is not specified.\n");
668                 return -EINVAL;
669         }
670         if (isdigit(argv[1][0])) {
671                 if (is_return) {
672                         pr_info("Return probe point must be a symbol.\n");
673                         return -EINVAL;
674                 }
675                 /* an address specified */
676                 ret = strict_strtoul(&argv[0][2], 0, (unsigned long *)&addr);
677                 if (ret) {
678                         pr_info("Failed to parse address.\n");
679                         return ret;
680                 }
681         } else {
682                 /* a symbol specified */
683                 symbol = argv[1];
684                 /* TODO: support .init module functions */
685                 ret = split_symbol_offset(symbol, &offset);
686                 if (ret) {
687                         pr_info("Failed to parse symbol.\n");
688                         return ret;
689                 }
690                 if (offset && is_return) {
691                         pr_info("Return probe must be used without offset.\n");
692                         return -EINVAL;
693                 }
694         }
695         argc -= 2; argv += 2;
696
697         /* setup a probe */
698         if (!event) {
699                 /* Make a new event name */
700                 if (symbol)
701                         snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
702                                  is_return ? 'r' : 'p', symbol, offset);
703                 else
704                         snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
705                                  is_return ? 'r' : 'p', addr);
706                 event = buf;
707         }
708         tp = alloc_trace_probe(group, event, addr, symbol, offset, argc,
709                                is_return);
710         if (IS_ERR(tp)) {
711                 pr_info("Failed to allocate trace_probe.(%d)\n",
712                         (int)PTR_ERR(tp));
713                 return PTR_ERR(tp);
714         }
715
716         /* parse arguments */
717         ret = 0;
718         for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
719                 /* Parse argument name */
720                 arg = strchr(argv[i], '=');
721                 if (arg)
722                         *arg++ = '\0';
723                 else
724                         arg = argv[i];
725
726                 if (conflict_field_name(argv[i], tp->args, i)) {
727                         pr_info("Argument%d name '%s' conflicts with "
728                                 "another field.\n", i, argv[i]);
729                         ret = -EINVAL;
730                         goto error;
731                 }
732
733                 tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
734                 if (!tp->args[i].name) {
735                         pr_info("Failed to allocate argument%d name '%s'.\n",
736                                 i, argv[i]);
737                         ret = -ENOMEM;
738                         goto error;
739                 }
740
741                 /* Parse fetch argument */
742                 ret = parse_probe_arg(arg, &tp->args[i].fetch, is_return);
743                 if (ret) {
744                         pr_info("Parse error at argument%d. (%d)\n", i, ret);
745                         kfree(tp->args[i].name);
746                         goto error;
747                 }
748
749                 tp->nr_args++;
750         }
751
752         ret = register_trace_probe(tp);
753         if (ret)
754                 goto error;
755         return 0;
756
757 error:
758         free_trace_probe(tp);
759         return ret;
760 }
761
762 static void cleanup_all_probes(void)
763 {
764         struct trace_probe *tp;
765
766         mutex_lock(&probe_lock);
767         /* TODO: Use batch unregistration */
768         while (!list_empty(&probe_list)) {
769                 tp = list_entry(probe_list.next, struct trace_probe, list);
770                 unregister_trace_probe(tp);
771                 free_trace_probe(tp);
772         }
773         mutex_unlock(&probe_lock);
774 }
775
776
777 /* Probes listing interfaces */
778 static void *probes_seq_start(struct seq_file *m, loff_t *pos)
779 {
780         mutex_lock(&probe_lock);
781         return seq_list_start(&probe_list, *pos);
782 }
783
784 static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
785 {
786         return seq_list_next(v, &probe_list, pos);
787 }
788
789 static void probes_seq_stop(struct seq_file *m, void *v)
790 {
791         mutex_unlock(&probe_lock);
792 }
793
794 static int probes_seq_show(struct seq_file *m, void *v)
795 {
796         struct trace_probe *tp = v;
797         int i, ret;
798         char buf[MAX_ARGSTR_LEN + 1];
799
800         seq_printf(m, "%c", probe_is_return(tp) ? 'r' : 'p');
801         seq_printf(m, ":%s/%s", tp->call.system, tp->call.name);
802
803         if (!tp->symbol)
804                 seq_printf(m, " 0x%p", tp->rp.kp.addr);
805         else if (tp->rp.kp.offset)
806                 seq_printf(m, " %s+%u", probe_symbol(tp), tp->rp.kp.offset);
807         else
808                 seq_printf(m, " %s", probe_symbol(tp));
809
810         for (i = 0; i < tp->nr_args; i++) {
811                 ret = probe_arg_string(buf, MAX_ARGSTR_LEN, &tp->args[i].fetch);
812                 if (ret < 0) {
813                         pr_warning("Argument%d decoding error(%d).\n", i, ret);
814                         return ret;
815                 }
816                 seq_printf(m, " %s=%s", tp->args[i].name, buf);
817         }
818         seq_printf(m, "\n");
819         return 0;
820 }
821
822 static const struct seq_operations probes_seq_op = {
823         .start  = probes_seq_start,
824         .next   = probes_seq_next,
825         .stop   = probes_seq_stop,
826         .show   = probes_seq_show
827 };
828
829 static int probes_open(struct inode *inode, struct file *file)
830 {
831         if ((file->f_mode & FMODE_WRITE) &&
832             (file->f_flags & O_TRUNC))
833                 cleanup_all_probes();
834
835         return seq_open(file, &probes_seq_op);
836 }
837
838 static int command_trace_probe(const char *buf)
839 {
840         char **argv;
841         int argc = 0, ret = 0;
842
843         argv = argv_split(GFP_KERNEL, buf, &argc);
844         if (!argv)
845                 return -ENOMEM;
846
847         if (argc)
848                 ret = create_trace_probe(argc, argv);
849
850         argv_free(argv);
851         return ret;
852 }
853
854 #define WRITE_BUFSIZE 128
855
856 static ssize_t probes_write(struct file *file, const char __user *buffer,
857                             size_t count, loff_t *ppos)
858 {
859         char *kbuf, *tmp;
860         int ret;
861         size_t done;
862         size_t size;
863
864         kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
865         if (!kbuf)
866                 return -ENOMEM;
867
868         ret = done = 0;
869         while (done < count) {
870                 size = count - done;
871                 if (size >= WRITE_BUFSIZE)
872                         size = WRITE_BUFSIZE - 1;
873                 if (copy_from_user(kbuf, buffer + done, size)) {
874                         ret = -EFAULT;
875                         goto out;
876                 }
877                 kbuf[size] = '\0';
878                 tmp = strchr(kbuf, '\n');
879                 if (tmp) {
880                         *tmp = '\0';
881                         size = tmp - kbuf + 1;
882                 } else if (done + size < count) {
883                         pr_warning("Line length is too long: "
884                                    "Should be less than %d.", WRITE_BUFSIZE);
885                         ret = -EINVAL;
886                         goto out;
887                 }
888                 done += size;
889                 /* Remove comments */
890                 tmp = strchr(kbuf, '#');
891                 if (tmp)
892                         *tmp = '\0';
893
894                 ret = command_trace_probe(kbuf);
895                 if (ret)
896                         goto out;
897         }
898         ret = done;
899 out:
900         kfree(kbuf);
901         return ret;
902 }
903
904 static const struct file_operations kprobe_events_ops = {
905         .owner          = THIS_MODULE,
906         .open           = probes_open,
907         .read           = seq_read,
908         .llseek         = seq_lseek,
909         .release        = seq_release,
910         .write          = probes_write,
911 };
912
913 /* Probes profiling interfaces */
914 static int probes_profile_seq_show(struct seq_file *m, void *v)
915 {
916         struct trace_probe *tp = v;
917
918         seq_printf(m, "  %-44s %15lu %15lu\n", tp->call.name, tp->nhit,
919                    tp->rp.kp.nmissed);
920
921         return 0;
922 }
923
924 static const struct seq_operations profile_seq_op = {
925         .start  = probes_seq_start,
926         .next   = probes_seq_next,
927         .stop   = probes_seq_stop,
928         .show   = probes_profile_seq_show
929 };
930
931 static int profile_open(struct inode *inode, struct file *file)
932 {
933         return seq_open(file, &profile_seq_op);
934 }
935
936 static const struct file_operations kprobe_profile_ops = {
937         .owner          = THIS_MODULE,
938         .open           = profile_open,
939         .read           = seq_read,
940         .llseek         = seq_lseek,
941         .release        = seq_release,
942 };
943
944 /* Kprobe handler */
945 static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
946 {
947         struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
948         struct kprobe_trace_entry *entry;
949         struct ring_buffer_event *event;
950         struct ring_buffer *buffer;
951         int size, i, pc;
952         unsigned long irq_flags;
953         struct ftrace_event_call *call = &tp->call;
954
955         tp->nhit++;
956
957         local_save_flags(irq_flags);
958         pc = preempt_count();
959
960         size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
961
962         event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
963                                                   irq_flags, pc);
964         if (!event)
965                 return 0;
966
967         entry = ring_buffer_event_data(event);
968         entry->nargs = tp->nr_args;
969         entry->ip = (unsigned long)kp->addr;
970         for (i = 0; i < tp->nr_args; i++)
971                 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
972
973         if (!filter_current_check_discard(buffer, call, entry, event))
974                 trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
975         return 0;
976 }
977
978 /* Kretprobe handler */
979 static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri,
980                                           struct pt_regs *regs)
981 {
982         struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
983         struct kretprobe_trace_entry *entry;
984         struct ring_buffer_event *event;
985         struct ring_buffer *buffer;
986         int size, i, pc;
987         unsigned long irq_flags;
988         struct ftrace_event_call *call = &tp->call;
989
990         local_save_flags(irq_flags);
991         pc = preempt_count();
992
993         size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
994
995         event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
996                                                   irq_flags, pc);
997         if (!event)
998                 return 0;
999
1000         entry = ring_buffer_event_data(event);
1001         entry->nargs = tp->nr_args;
1002         entry->func = (unsigned long)tp->rp.kp.addr;
1003         entry->ret_ip = (unsigned long)ri->ret_addr;
1004         for (i = 0; i < tp->nr_args; i++)
1005                 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
1006
1007         if (!filter_current_check_discard(buffer, call, entry, event))
1008                 trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
1009
1010         return 0;
1011 }
1012
1013 /* Event entry printers */
1014 enum print_line_t
1015 print_kprobe_event(struct trace_iterator *iter, int flags)
1016 {
1017         struct kprobe_trace_entry *field;
1018         struct trace_seq *s = &iter->seq;
1019         struct trace_event *event;
1020         struct trace_probe *tp;
1021         int i;
1022
1023         field = (struct kprobe_trace_entry *)iter->ent;
1024         event = ftrace_find_event(field->ent.type);
1025         tp = container_of(event, struct trace_probe, event);
1026
1027         if (!trace_seq_printf(s, "%s: (", tp->call.name))
1028                 goto partial;
1029
1030         if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1031                 goto partial;
1032
1033         if (!trace_seq_puts(s, ")"))
1034                 goto partial;
1035
1036         for (i = 0; i < field->nargs; i++)
1037                 if (!trace_seq_printf(s, " %s=%lx",
1038                                       tp->args[i].name, field->args[i]))
1039                         goto partial;
1040
1041         if (!trace_seq_puts(s, "\n"))
1042                 goto partial;
1043
1044         return TRACE_TYPE_HANDLED;
1045 partial:
1046         return TRACE_TYPE_PARTIAL_LINE;
1047 }
1048
1049 enum print_line_t
1050 print_kretprobe_event(struct trace_iterator *iter, int flags)
1051 {
1052         struct kretprobe_trace_entry *field;
1053         struct trace_seq *s = &iter->seq;
1054         struct trace_event *event;
1055         struct trace_probe *tp;
1056         int i;
1057
1058         field = (struct kretprobe_trace_entry *)iter->ent;
1059         event = ftrace_find_event(field->ent.type);
1060         tp = container_of(event, struct trace_probe, event);
1061
1062         if (!trace_seq_printf(s, "%s: (", tp->call.name))
1063                 goto partial;
1064
1065         if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1066                 goto partial;
1067
1068         if (!trace_seq_puts(s, " <- "))
1069                 goto partial;
1070
1071         if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1072                 goto partial;
1073
1074         if (!trace_seq_puts(s, ")"))
1075                 goto partial;
1076
1077         for (i = 0; i < field->nargs; i++)
1078                 if (!trace_seq_printf(s, " %s=%lx",
1079                                       tp->args[i].name, field->args[i]))
1080                         goto partial;
1081
1082         if (!trace_seq_puts(s, "\n"))
1083                 goto partial;
1084
1085         return TRACE_TYPE_HANDLED;
1086 partial:
1087         return TRACE_TYPE_PARTIAL_LINE;
1088 }
1089
1090 static int probe_event_enable(struct ftrace_event_call *call)
1091 {
1092         struct trace_probe *tp = (struct trace_probe *)call->data;
1093
1094         tp->flags |= TP_FLAG_TRACE;
1095         if (probe_is_return(tp))
1096                 return enable_kretprobe(&tp->rp);
1097         else
1098                 return enable_kprobe(&tp->rp.kp);
1099 }
1100
1101 static void probe_event_disable(struct ftrace_event_call *call)
1102 {
1103         struct trace_probe *tp = (struct trace_probe *)call->data;
1104
1105         tp->flags &= ~TP_FLAG_TRACE;
1106         if (!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE))) {
1107                 if (probe_is_return(tp))
1108                         disable_kretprobe(&tp->rp);
1109                 else
1110                         disable_kprobe(&tp->rp.kp);
1111         }
1112 }
1113
1114 static int probe_event_raw_init(struct ftrace_event_call *event_call)
1115 {
1116         INIT_LIST_HEAD(&event_call->fields);
1117
1118         return 0;
1119 }
1120
1121 #undef DEFINE_FIELD
1122 #define DEFINE_FIELD(type, item, name, is_signed)                       \
1123         do {                                                            \
1124                 ret = trace_define_field(event_call, #type, name,       \
1125                                          offsetof(typeof(field), item), \
1126                                          sizeof(field.item), is_signed, \
1127                                          FILTER_OTHER);                 \
1128                 if (ret)                                                \
1129                         return ret;                                     \
1130         } while (0)
1131
1132 static int kprobe_event_define_fields(struct ftrace_event_call *event_call)
1133 {
1134         int ret, i;
1135         struct kprobe_trace_entry field;
1136         struct trace_probe *tp = (struct trace_probe *)event_call->data;
1137
1138         DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1139         DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1);
1140         /* Set argument names as fields */
1141         for (i = 0; i < tp->nr_args; i++)
1142                 DEFINE_FIELD(unsigned long, args[i], tp->args[i].name, 0);
1143         return 0;
1144 }
1145
1146 static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
1147 {
1148         int ret, i;
1149         struct kretprobe_trace_entry field;
1150         struct trace_probe *tp = (struct trace_probe *)event_call->data;
1151
1152         DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1153         DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1154         DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1);
1155         /* Set argument names as fields */
1156         for (i = 0; i < tp->nr_args; i++)
1157                 DEFINE_FIELD(unsigned long, args[i], tp->args[i].name, 0);
1158         return 0;
1159 }
1160
1161 static int __probe_event_show_format(struct trace_seq *s,
1162                                      struct trace_probe *tp, const char *fmt,
1163                                      const char *arg)
1164 {
1165         int i;
1166
1167         /* Show format */
1168         if (!trace_seq_printf(s, "\nprint fmt: \"%s", fmt))
1169                 return 0;
1170
1171         for (i = 0; i < tp->nr_args; i++)
1172                 if (!trace_seq_printf(s, " %s=%%lx", tp->args[i].name))
1173                         return 0;
1174
1175         if (!trace_seq_printf(s, "\", %s", arg))
1176                 return 0;
1177
1178         for (i = 0; i < tp->nr_args; i++)
1179                 if (!trace_seq_printf(s, ", REC->%s", tp->args[i].name))
1180                         return 0;
1181
1182         return trace_seq_puts(s, "\n");
1183 }
1184
1185 #undef SHOW_FIELD
1186 #define SHOW_FIELD(type, item, name)                                    \
1187         do {                                                            \
1188                 ret = trace_seq_printf(s, "\tfield:" #type " %s;\t"     \
1189                                 "offset:%u;\tsize:%u;\tsigned:%d;\n", name,\
1190                                 (unsigned int)offsetof(typeof(field), item),\
1191                                 (unsigned int)sizeof(type),             \
1192                                 is_signed_type(type));                  \
1193                 if (!ret)                                               \
1194                         return 0;                                       \
1195         } while (0)
1196
1197 static int kprobe_event_show_format(struct ftrace_event_call *call,
1198                                     struct trace_seq *s)
1199 {
1200         struct kprobe_trace_entry field __attribute__((unused));
1201         int ret, i;
1202         struct trace_probe *tp = (struct trace_probe *)call->data;
1203
1204         SHOW_FIELD(unsigned long, ip, FIELD_STRING_IP);
1205         SHOW_FIELD(int, nargs, FIELD_STRING_NARGS);
1206
1207         /* Show fields */
1208         for (i = 0; i < tp->nr_args; i++)
1209                 SHOW_FIELD(unsigned long, args[i], tp->args[i].name);
1210         trace_seq_puts(s, "\n");
1211
1212         return __probe_event_show_format(s, tp, "(%lx)",
1213                                          "REC->" FIELD_STRING_IP);
1214 }
1215
1216 static int kretprobe_event_show_format(struct ftrace_event_call *call,
1217                                        struct trace_seq *s)
1218 {
1219         struct kretprobe_trace_entry field __attribute__((unused));
1220         int ret, i;
1221         struct trace_probe *tp = (struct trace_probe *)call->data;
1222
1223         SHOW_FIELD(unsigned long, func, FIELD_STRING_FUNC);
1224         SHOW_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP);
1225         SHOW_FIELD(int, nargs, FIELD_STRING_NARGS);
1226
1227         /* Show fields */
1228         for (i = 0; i < tp->nr_args; i++)
1229                 SHOW_FIELD(unsigned long, args[i], tp->args[i].name);
1230         trace_seq_puts(s, "\n");
1231
1232         return __probe_event_show_format(s, tp, "(%lx <- %lx)",
1233                                          "REC->" FIELD_STRING_FUNC
1234                                          ", REC->" FIELD_STRING_RETIP);
1235 }
1236
1237 #ifdef CONFIG_PERF_EVENTS
1238
1239 /* Kprobe profile handler */
1240 static __kprobes int kprobe_profile_func(struct kprobe *kp,
1241                                          struct pt_regs *regs)
1242 {
1243         struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
1244         struct ftrace_event_call *call = &tp->call;
1245         struct kprobe_trace_entry *entry;
1246         struct trace_entry *ent;
1247         int size, __size, i, pc, __cpu;
1248         unsigned long irq_flags;
1249         char *trace_buf;
1250         char *raw_data;
1251         int rctx;
1252
1253         pc = preempt_count();
1254         __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
1255         size = ALIGN(__size + sizeof(u32), sizeof(u64));
1256         size -= sizeof(u32);
1257         if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
1258                      "profile buffer not large enough"))
1259                 return 0;
1260
1261         /*
1262          * Protect the non nmi buffer
1263          * This also protects the rcu read side
1264          */
1265         local_irq_save(irq_flags);
1266
1267         rctx = perf_swevent_get_recursion_context();
1268         if (rctx < 0)
1269                 goto end_recursion;
1270
1271         __cpu = smp_processor_id();
1272
1273         if (in_nmi())
1274                 trace_buf = rcu_dereference(perf_trace_buf_nmi);
1275         else
1276                 trace_buf = rcu_dereference(perf_trace_buf);
1277
1278         if (!trace_buf)
1279                 goto end;
1280
1281         raw_data = per_cpu_ptr(trace_buf, __cpu);
1282
1283         /* Zero dead bytes from alignment to avoid buffer leak to userspace */
1284         *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
1285         entry = (struct kprobe_trace_entry *)raw_data;
1286         ent = &entry->ent;
1287
1288         tracing_generic_entry_update(ent, irq_flags, pc);
1289         ent->type = call->id;
1290         entry->nargs = tp->nr_args;
1291         entry->ip = (unsigned long)kp->addr;
1292         for (i = 0; i < tp->nr_args; i++)
1293                 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
1294         perf_tp_event(call->id, entry->ip, 1, entry, size);
1295
1296 end:
1297         perf_swevent_put_recursion_context(rctx);
1298 end_recursion:
1299         local_irq_restore(irq_flags);
1300
1301         return 0;
1302 }
1303
1304 /* Kretprobe profile handler */
1305 static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
1306                                             struct pt_regs *regs)
1307 {
1308         struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
1309         struct ftrace_event_call *call = &tp->call;
1310         struct kretprobe_trace_entry *entry;
1311         struct trace_entry *ent;
1312         int size, __size, i, pc, __cpu;
1313         unsigned long irq_flags;
1314         char *trace_buf;
1315         char *raw_data;
1316         int rctx;
1317
1318         pc = preempt_count();
1319         __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
1320         size = ALIGN(__size + sizeof(u32), sizeof(u64));
1321         size -= sizeof(u32);
1322         if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
1323                      "profile buffer not large enough"))
1324                 return 0;
1325
1326         /*
1327          * Protect the non nmi buffer
1328          * This also protects the rcu read side
1329          */
1330         local_irq_save(irq_flags);
1331
1332         rctx = perf_swevent_get_recursion_context();
1333         if (rctx < 0)
1334                 goto end_recursion;
1335
1336         __cpu = smp_processor_id();
1337
1338         if (in_nmi())
1339                 trace_buf = rcu_dereference(perf_trace_buf_nmi);
1340         else
1341                 trace_buf = rcu_dereference(perf_trace_buf);
1342
1343         if (!trace_buf)
1344                 goto end;
1345
1346         raw_data = per_cpu_ptr(trace_buf, __cpu);
1347
1348         /* Zero dead bytes from alignment to avoid buffer leak to userspace */
1349         *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
1350         entry = (struct kretprobe_trace_entry *)raw_data;
1351         ent = &entry->ent;
1352
1353         tracing_generic_entry_update(ent, irq_flags, pc);
1354         ent->type = call->id;
1355         entry->nargs = tp->nr_args;
1356         entry->func = (unsigned long)tp->rp.kp.addr;
1357         entry->ret_ip = (unsigned long)ri->ret_addr;
1358         for (i = 0; i < tp->nr_args; i++)
1359                 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
1360         perf_tp_event(call->id, entry->ret_ip, 1, entry, size);
1361
1362 end:
1363         perf_swevent_put_recursion_context(rctx);
1364 end_recursion:
1365         local_irq_restore(irq_flags);
1366
1367         return 0;
1368 }
1369
1370 static int probe_profile_enable(struct ftrace_event_call *call)
1371 {
1372         struct trace_probe *tp = (struct trace_probe *)call->data;
1373
1374         tp->flags |= TP_FLAG_PROFILE;
1375
1376         if (probe_is_return(tp))
1377                 return enable_kretprobe(&tp->rp);
1378         else
1379                 return enable_kprobe(&tp->rp.kp);
1380 }
1381
1382 static void probe_profile_disable(struct ftrace_event_call *call)
1383 {
1384         struct trace_probe *tp = (struct trace_probe *)call->data;
1385
1386         tp->flags &= ~TP_FLAG_PROFILE;
1387
1388         if (!(tp->flags & TP_FLAG_TRACE)) {
1389                 if (probe_is_return(tp))
1390                         disable_kretprobe(&tp->rp);
1391                 else
1392                         disable_kprobe(&tp->rp.kp);
1393         }
1394 }
1395 #endif  /* CONFIG_PERF_EVENTS */
1396
1397
1398 static __kprobes
1399 int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1400 {
1401         struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
1402
1403         if (tp->flags & TP_FLAG_TRACE)
1404                 kprobe_trace_func(kp, regs);
1405 #ifdef CONFIG_PERF_EVENTS
1406         if (tp->flags & TP_FLAG_PROFILE)
1407                 kprobe_profile_func(kp, regs);
1408 #endif
1409         return 0;       /* We don't tweek kernel, so just return 0 */
1410 }
1411
1412 static __kprobes
1413 int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1414 {
1415         struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
1416
1417         if (tp->flags & TP_FLAG_TRACE)
1418                 kretprobe_trace_func(ri, regs);
1419 #ifdef CONFIG_PERF_EVENTS
1420         if (tp->flags & TP_FLAG_PROFILE)
1421                 kretprobe_profile_func(ri, regs);
1422 #endif
1423         return 0;       /* We don't tweek kernel, so just return 0 */
1424 }
1425
1426 static int register_probe_event(struct trace_probe *tp)
1427 {
1428         struct ftrace_event_call *call = &tp->call;
1429         int ret;
1430
1431         /* Initialize ftrace_event_call */
1432         if (probe_is_return(tp)) {
1433                 tp->event.trace = print_kretprobe_event;
1434                 call->raw_init = probe_event_raw_init;
1435                 call->show_format = kretprobe_event_show_format;
1436                 call->define_fields = kretprobe_event_define_fields;
1437         } else {
1438                 tp->event.trace = print_kprobe_event;
1439                 call->raw_init = probe_event_raw_init;
1440                 call->show_format = kprobe_event_show_format;
1441                 call->define_fields = kprobe_event_define_fields;
1442         }
1443         call->event = &tp->event;
1444         call->id = register_ftrace_event(&tp->event);
1445         if (!call->id)
1446                 return -ENODEV;
1447         call->enabled = 0;
1448         call->regfunc = probe_event_enable;
1449         call->unregfunc = probe_event_disable;
1450
1451 #ifdef CONFIG_PERF_EVENTS
1452         call->profile_enable = probe_profile_enable;
1453         call->profile_disable = probe_profile_disable;
1454 #endif
1455         call->data = tp;
1456         ret = trace_add_event_call(call);
1457         if (ret) {
1458                 pr_info("Failed to register kprobe event: %s\n", call->name);
1459                 unregister_ftrace_event(&tp->event);
1460         }
1461         return ret;
1462 }
1463
1464 static void unregister_probe_event(struct trace_probe *tp)
1465 {
1466         /* tp->event is unregistered in trace_remove_event_call() */
1467         trace_remove_event_call(&tp->call);
1468 }
1469
1470 /* Make a debugfs interface for controling probe points */
1471 static __init int init_kprobe_trace(void)
1472 {
1473         struct dentry *d_tracer;
1474         struct dentry *entry;
1475
1476         d_tracer = tracing_init_dentry();
1477         if (!d_tracer)
1478                 return 0;
1479
1480         entry = debugfs_create_file("kprobe_events", 0644, d_tracer,
1481                                     NULL, &kprobe_events_ops);
1482
1483         /* Event list interface */
1484         if (!entry)
1485                 pr_warning("Could not create debugfs "
1486                            "'kprobe_events' entry\n");
1487
1488         /* Profile interface */
1489         entry = debugfs_create_file("kprobe_profile", 0444, d_tracer,
1490                                     NULL, &kprobe_profile_ops);
1491
1492         if (!entry)
1493                 pr_warning("Could not create debugfs "
1494                            "'kprobe_profile' entry\n");
1495         return 0;
1496 }
1497 fs_initcall(init_kprobe_trace);
1498
1499
1500 #ifdef CONFIG_FTRACE_STARTUP_TEST
1501
1502 static int kprobe_trace_selftest_target(int a1, int a2, int a3,
1503                                         int a4, int a5, int a6)
1504 {
1505         return a1 + a2 + a3 + a4 + a5 + a6;
1506 }
1507
1508 static __init int kprobe_trace_self_tests_init(void)
1509 {
1510         int ret;
1511         int (*target)(int, int, int, int, int, int);
1512
1513         target = kprobe_trace_selftest_target;
1514
1515         pr_info("Testing kprobe tracing: ");
1516
1517         ret = command_trace_probe("p:testprobe kprobe_trace_selftest_target "
1518                                   "$arg1 $arg2 $arg3 $arg4 $stack $stack0");
1519         if (WARN_ON_ONCE(ret))
1520                 pr_warning("error enabling function entry\n");
1521
1522         ret = command_trace_probe("r:testprobe2 kprobe_trace_selftest_target "
1523                                   "$retval");
1524         if (WARN_ON_ONCE(ret))
1525                 pr_warning("error enabling function return\n");
1526
1527         ret = target(1, 2, 3, 4, 5, 6);
1528
1529         cleanup_all_probes();
1530
1531         pr_cont("OK\n");
1532         return 0;
1533 }
1534
1535 late_initcall(kprobe_trace_self_tests_init);
1536
1537 #endif