tracing/kprobes: Add $ prefix to special variables
[safe/jmp/linux-2.6] / kernel / trace / trace_kprobe.c
1 /*
2  * kprobe based kernel tracer
3  *
4  * Created by Masami Hiramatsu <mhiramat@redhat.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
18  */
19
20 #include <linux/module.h>
21 #include <linux/uaccess.h>
22 #include <linux/kprobes.h>
23 #include <linux/seq_file.h>
24 #include <linux/slab.h>
25 #include <linux/smp.h>
26 #include <linux/debugfs.h>
27 #include <linux/types.h>
28 #include <linux/string.h>
29 #include <linux/ctype.h>
30 #include <linux/ptrace.h>
31 #include <linux/perf_event.h>
32
33 #include "trace.h"
34 #include "trace_output.h"
35
36 #define MAX_TRACE_ARGS 128
37 #define MAX_ARGSTR_LEN 63
38 #define MAX_EVENT_NAME_LEN 64
39 #define KPROBE_EVENT_SYSTEM "kprobes"
40
41 /* currently, trace_kprobe only supports X86. */
42
43 struct fetch_func {
44         unsigned long (*func)(struct pt_regs *, void *);
45         void *data;
46 };
47
48 static __kprobes unsigned long call_fetch(struct fetch_func *f,
49                                           struct pt_regs *regs)
50 {
51         return f->func(regs, f->data);
52 }
53
54 /* fetch handlers */
55 static __kprobes unsigned long fetch_register(struct pt_regs *regs,
56                                               void *offset)
57 {
58         return regs_get_register(regs, (unsigned int)((unsigned long)offset));
59 }
60
61 static __kprobes unsigned long fetch_stack(struct pt_regs *regs,
62                                            void *num)
63 {
64         return regs_get_kernel_stack_nth(regs,
65                                          (unsigned int)((unsigned long)num));
66 }
67
68 static __kprobes unsigned long fetch_memory(struct pt_regs *regs, void *addr)
69 {
70         unsigned long retval;
71
72         if (probe_kernel_address(addr, retval))
73                 return 0;
74         return retval;
75 }
76
77 static __kprobes unsigned long fetch_argument(struct pt_regs *regs, void *num)
78 {
79         return regs_get_argument_nth(regs, (unsigned int)((unsigned long)num));
80 }
81
82 static __kprobes unsigned long fetch_retvalue(struct pt_regs *regs,
83                                               void *dummy)
84 {
85         return regs_return_value(regs);
86 }
87
88 static __kprobes unsigned long fetch_ip(struct pt_regs *regs, void *dummy)
89 {
90         return instruction_pointer(regs);
91 }
92
93 static __kprobes unsigned long fetch_stack_address(struct pt_regs *regs,
94                                                    void *dummy)
95 {
96         return kernel_stack_pointer(regs);
97 }
98
99 /* Memory fetching by symbol */
100 struct symbol_cache {
101         char *symbol;
102         long offset;
103         unsigned long addr;
104 };
105
106 static unsigned long update_symbol_cache(struct symbol_cache *sc)
107 {
108         sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol);
109         if (sc->addr)
110                 sc->addr += sc->offset;
111         return sc->addr;
112 }
113
114 static void free_symbol_cache(struct symbol_cache *sc)
115 {
116         kfree(sc->symbol);
117         kfree(sc);
118 }
119
120 static struct symbol_cache *alloc_symbol_cache(const char *sym, long offset)
121 {
122         struct symbol_cache *sc;
123
124         if (!sym || strlen(sym) == 0)
125                 return NULL;
126         sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL);
127         if (!sc)
128                 return NULL;
129
130         sc->symbol = kstrdup(sym, GFP_KERNEL);
131         if (!sc->symbol) {
132                 kfree(sc);
133                 return NULL;
134         }
135         sc->offset = offset;
136
137         update_symbol_cache(sc);
138         return sc;
139 }
140
141 static __kprobes unsigned long fetch_symbol(struct pt_regs *regs, void *data)
142 {
143         struct symbol_cache *sc = data;
144
145         if (sc->addr)
146                 return fetch_memory(regs, (void *)sc->addr);
147         else
148                 return 0;
149 }
150
151 /* Special indirect memory access interface */
152 struct indirect_fetch_data {
153         struct fetch_func orig;
154         long offset;
155 };
156
157 static __kprobes unsigned long fetch_indirect(struct pt_regs *regs, void *data)
158 {
159         struct indirect_fetch_data *ind = data;
160         unsigned long addr;
161
162         addr = call_fetch(&ind->orig, regs);
163         if (addr) {
164                 addr += ind->offset;
165                 return fetch_memory(regs, (void *)addr);
166         } else
167                 return 0;
168 }
169
170 static __kprobes void free_indirect_fetch_data(struct indirect_fetch_data *data)
171 {
172         if (data->orig.func == fetch_indirect)
173                 free_indirect_fetch_data(data->orig.data);
174         else if (data->orig.func == fetch_symbol)
175                 free_symbol_cache(data->orig.data);
176         kfree(data);
177 }
178
179 /**
180  * Kprobe tracer core functions
181  */
182
183 struct probe_arg {
184         struct fetch_func       fetch;
185         const char              *name;
186 };
187
188 /* Flags for trace_probe */
189 #define TP_FLAG_TRACE   1
190 #define TP_FLAG_PROFILE 2
191
192 struct trace_probe {
193         struct list_head        list;
194         struct kretprobe        rp;     /* Use rp.kp for kprobe use */
195         unsigned long           nhit;
196         unsigned int            flags;  /* For TP_FLAG_* */
197         const char              *symbol;        /* symbol name */
198         struct ftrace_event_call        call;
199         struct trace_event              event;
200         unsigned int            nr_args;
201         struct probe_arg        args[];
202 };
203
204 #define SIZEOF_TRACE_PROBE(n)                   \
205         (offsetof(struct trace_probe, args) +   \
206         (sizeof(struct probe_arg) * (n)))
207
208 static __kprobes int probe_is_return(struct trace_probe *tp)
209 {
210         return tp->rp.handler != NULL;
211 }
212
213 static __kprobes const char *probe_symbol(struct trace_probe *tp)
214 {
215         return tp->symbol ? tp->symbol : "unknown";
216 }
217
218 static int probe_arg_string(char *buf, size_t n, struct fetch_func *ff)
219 {
220         int ret = -EINVAL;
221
222         if (ff->func == fetch_argument)
223                 ret = snprintf(buf, n, "$a%lu", (unsigned long)ff->data);
224         else if (ff->func == fetch_register) {
225                 const char *name;
226                 name = regs_query_register_name((unsigned int)((long)ff->data));
227                 ret = snprintf(buf, n, "%%%s", name);
228         } else if (ff->func == fetch_stack)
229                 ret = snprintf(buf, n, "$s%lu", (unsigned long)ff->data);
230         else if (ff->func == fetch_memory)
231                 ret = snprintf(buf, n, "@0x%p", ff->data);
232         else if (ff->func == fetch_symbol) {
233                 struct symbol_cache *sc = ff->data;
234                 ret = snprintf(buf, n, "@%s%+ld", sc->symbol, sc->offset);
235         } else if (ff->func == fetch_retvalue)
236                 ret = snprintf(buf, n, "$rv");
237         else if (ff->func == fetch_ip)
238                 ret = snprintf(buf, n, "$ra");
239         else if (ff->func == fetch_stack_address)
240                 ret = snprintf(buf, n, "$sa");
241         else if (ff->func == fetch_indirect) {
242                 struct indirect_fetch_data *id = ff->data;
243                 size_t l = 0;
244                 ret = snprintf(buf, n, "%+ld(", id->offset);
245                 if (ret >= n)
246                         goto end;
247                 l += ret;
248                 ret = probe_arg_string(buf + l, n - l, &id->orig);
249                 if (ret < 0)
250                         goto end;
251                 l += ret;
252                 ret = snprintf(buf + l, n - l, ")");
253                 ret += l;
254         }
255 end:
256         if (ret >= n)
257                 return -ENOSPC;
258         return ret;
259 }
260
261 static int register_probe_event(struct trace_probe *tp);
262 static void unregister_probe_event(struct trace_probe *tp);
263
264 static DEFINE_MUTEX(probe_lock);
265 static LIST_HEAD(probe_list);
266
267 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
268 static int kretprobe_dispatcher(struct kretprobe_instance *ri,
269                                 struct pt_regs *regs);
270
271 /*
272  * Allocate new trace_probe and initialize it (including kprobes).
273  */
274 static struct trace_probe *alloc_trace_probe(const char *group,
275                                              const char *event,
276                                              void *addr,
277                                              const char *symbol,
278                                              unsigned long offs,
279                                              int nargs, int is_return)
280 {
281         struct trace_probe *tp;
282
283         tp = kzalloc(SIZEOF_TRACE_PROBE(nargs), GFP_KERNEL);
284         if (!tp)
285                 return ERR_PTR(-ENOMEM);
286
287         if (symbol) {
288                 tp->symbol = kstrdup(symbol, GFP_KERNEL);
289                 if (!tp->symbol)
290                         goto error;
291                 tp->rp.kp.symbol_name = tp->symbol;
292                 tp->rp.kp.offset = offs;
293         } else
294                 tp->rp.kp.addr = addr;
295
296         if (is_return)
297                 tp->rp.handler = kretprobe_dispatcher;
298         else
299                 tp->rp.kp.pre_handler = kprobe_dispatcher;
300
301         if (!event)
302                 goto error;
303         tp->call.name = kstrdup(event, GFP_KERNEL);
304         if (!tp->call.name)
305                 goto error;
306
307         if (!group)
308                 goto error;
309         tp->call.system = kstrdup(group, GFP_KERNEL);
310         if (!tp->call.system)
311                 goto error;
312
313         INIT_LIST_HEAD(&tp->list);
314         return tp;
315 error:
316         kfree(tp->call.name);
317         kfree(tp->symbol);
318         kfree(tp);
319         return ERR_PTR(-ENOMEM);
320 }
321
322 static void free_probe_arg(struct probe_arg *arg)
323 {
324         if (arg->fetch.func == fetch_symbol)
325                 free_symbol_cache(arg->fetch.data);
326         else if (arg->fetch.func == fetch_indirect)
327                 free_indirect_fetch_data(arg->fetch.data);
328         kfree(arg->name);
329 }
330
331 static void free_trace_probe(struct trace_probe *tp)
332 {
333         int i;
334
335         for (i = 0; i < tp->nr_args; i++)
336                 free_probe_arg(&tp->args[i]);
337
338         kfree(tp->call.system);
339         kfree(tp->call.name);
340         kfree(tp->symbol);
341         kfree(tp);
342 }
343
344 static struct trace_probe *find_probe_event(const char *event)
345 {
346         struct trace_probe *tp;
347
348         list_for_each_entry(tp, &probe_list, list)
349                 if (!strcmp(tp->call.name, event))
350                         return tp;
351         return NULL;
352 }
353
354 /* Unregister a trace_probe and probe_event: call with locking probe_lock */
355 static void unregister_trace_probe(struct trace_probe *tp)
356 {
357         if (probe_is_return(tp))
358                 unregister_kretprobe(&tp->rp);
359         else
360                 unregister_kprobe(&tp->rp.kp);
361         list_del(&tp->list);
362         unregister_probe_event(tp);
363 }
364
365 /* Register a trace_probe and probe_event */
366 static int register_trace_probe(struct trace_probe *tp)
367 {
368         struct trace_probe *old_tp;
369         int ret;
370
371         mutex_lock(&probe_lock);
372
373         /* register as an event */
374         old_tp = find_probe_event(tp->call.name);
375         if (old_tp) {
376                 /* delete old event */
377                 unregister_trace_probe(old_tp);
378                 free_trace_probe(old_tp);
379         }
380         ret = register_probe_event(tp);
381         if (ret) {
382                 pr_warning("Faild to register probe event(%d)\n", ret);
383                 goto end;
384         }
385
386         tp->rp.kp.flags |= KPROBE_FLAG_DISABLED;
387         if (probe_is_return(tp))
388                 ret = register_kretprobe(&tp->rp);
389         else
390                 ret = register_kprobe(&tp->rp.kp);
391
392         if (ret) {
393                 pr_warning("Could not insert probe(%d)\n", ret);
394                 if (ret == -EILSEQ) {
395                         pr_warning("Probing address(0x%p) is not an "
396                                    "instruction boundary.\n",
397                                    tp->rp.kp.addr);
398                         ret = -EINVAL;
399                 }
400                 unregister_probe_event(tp);
401         } else
402                 list_add_tail(&tp->list, &probe_list);
403 end:
404         mutex_unlock(&probe_lock);
405         return ret;
406 }
407
408 /* Split symbol and offset. */
409 static int split_symbol_offset(char *symbol, unsigned long *offset)
410 {
411         char *tmp;
412         int ret;
413
414         if (!offset)
415                 return -EINVAL;
416
417         tmp = strchr(symbol, '+');
418         if (tmp) {
419                 /* skip sign because strict_strtol doesn't accept '+' */
420                 ret = strict_strtoul(tmp + 1, 0, offset);
421                 if (ret)
422                         return ret;
423                 *tmp = '\0';
424         } else
425                 *offset = 0;
426         return 0;
427 }
428
429 #define PARAM_MAX_ARGS 16
430 #define PARAM_MAX_STACK (THREAD_SIZE / sizeof(unsigned long))
431
432 static int parse_probe_vars(char *arg, struct fetch_func *ff, int is_return)
433 {
434         int ret = 0;
435         unsigned long param;
436
437         switch (arg[0]) {
438         case 'a':       /* argument */
439                 ret = strict_strtoul(arg + 1, 10, &param);
440                 if (ret || param > PARAM_MAX_ARGS)
441                         ret = -EINVAL;
442                 else {
443                         ff->func = fetch_argument;
444                         ff->data = (void *)param;
445                 }
446                 break;
447         case 'r':       /* retval or retaddr */
448                 if (is_return && arg[1] == 'v') {
449                         ff->func = fetch_retvalue;
450                         ff->data = NULL;
451                 } else if (is_return && arg[1] == 'a') {
452                         ff->func = fetch_ip;
453                         ff->data = NULL;
454                 } else
455                         ret = -EINVAL;
456                 break;
457         case 's':       /* stack */
458                 if (arg[1] == 'a') {
459                         ff->func = fetch_stack_address;
460                         ff->data = NULL;
461                 } else {
462                         ret = strict_strtoul(arg + 1, 10, &param);
463                         if (ret || param > PARAM_MAX_STACK)
464                                 ret = -EINVAL;
465                         else {
466                                 ff->func = fetch_stack;
467                                 ff->data = (void *)param;
468                         }
469                 }
470                 break;
471         default:
472                 ret = -EINVAL;
473         }
474         return ret;
475 }
476
477 static int parse_probe_arg(char *arg, struct fetch_func *ff, int is_return)
478 {
479         int ret = 0;
480         unsigned long param;
481         long offset;
482         char *tmp;
483
484         switch (arg[0]) {
485         case '$':
486                 ret = parse_probe_vars(arg + 1, ff, is_return);
487                 break;
488         case '%':       /* named register */
489                 ret = regs_query_register_offset(arg + 1);
490                 if (ret >= 0) {
491                         ff->func = fetch_register;
492                         ff->data = (void *)(unsigned long)ret;
493                         ret = 0;
494                 }
495                 break;
496         case '@':       /* memory or symbol */
497                 if (isdigit(arg[1])) {
498                         ret = strict_strtoul(arg + 1, 0, &param);
499                         if (ret)
500                                 break;
501                         ff->func = fetch_memory;
502                         ff->data = (void *)param;
503                 } else {
504                         ret = split_symbol_offset(arg + 1, &offset);
505                         if (ret)
506                                 break;
507                         ff->data = alloc_symbol_cache(arg + 1, offset);
508                         if (ff->data)
509                                 ff->func = fetch_symbol;
510                         else
511                                 ret = -EINVAL;
512                 }
513                 break;
514         case '+':       /* indirect memory */
515         case '-':
516                 tmp = strchr(arg, '(');
517                 if (!tmp) {
518                         ret = -EINVAL;
519                         break;
520                 }
521                 *tmp = '\0';
522                 ret = strict_strtol(arg + 1, 0, &offset);
523                 if (ret)
524                         break;
525                 if (arg[0] == '-')
526                         offset = -offset;
527                 arg = tmp + 1;
528                 tmp = strrchr(arg, ')');
529                 if (tmp) {
530                         struct indirect_fetch_data *id;
531                         *tmp = '\0';
532                         id = kzalloc(sizeof(struct indirect_fetch_data),
533                                      GFP_KERNEL);
534                         if (!id)
535                                 return -ENOMEM;
536                         id->offset = offset;
537                         ret = parse_probe_arg(arg, &id->orig, is_return);
538                         if (ret)
539                                 kfree(id);
540                         else {
541                                 ff->func = fetch_indirect;
542                                 ff->data = (void *)id;
543                         }
544                 } else
545                         ret = -EINVAL;
546                 break;
547         default:
548                 /* TODO: support custom handler */
549                 ret = -EINVAL;
550         }
551         return ret;
552 }
553
554 static int create_trace_probe(int argc, char **argv)
555 {
556         /*
557          * Argument syntax:
558          *  - Add kprobe: p[:[GRP/]EVENT] KSYM[+OFFS]|KADDR [FETCHARGS]
559          *  - Add kretprobe: r[:[GRP/]EVENT] KSYM[+0] [FETCHARGS]
560          * Fetch args:
561          *  $aN : fetch Nth of function argument. (N:0-)
562          *  $rv : fetch return value
563          *  $ra : fetch return address
564          *  $sa : fetch stack address
565          *  $sN : fetch Nth of stack (N:0-)
566          *  @ADDR       : fetch memory at ADDR (ADDR should be in kernel)
567          *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
568          *  %REG        : fetch register REG
569          * Indirect memory fetch:
570          *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
571          * Alias name of args:
572          *  NAME=FETCHARG : set NAME as alias of FETCHARG.
573          */
574         struct trace_probe *tp;
575         int i, ret = 0;
576         int is_return = 0;
577         char *symbol = NULL, *event = NULL, *arg = NULL, *group = NULL;
578         unsigned long offset = 0;
579         void *addr = NULL;
580         char buf[MAX_EVENT_NAME_LEN];
581
582         if (argc < 2)
583                 return -EINVAL;
584
585         if (argv[0][0] == 'p')
586                 is_return = 0;
587         else if (argv[0][0] == 'r')
588                 is_return = 1;
589         else
590                 return -EINVAL;
591
592         if (argv[0][1] == ':') {
593                 event = &argv[0][2];
594                 if (strchr(event, '/')) {
595                         group = event;
596                         event = strchr(group, '/') + 1;
597                         event[-1] = '\0';
598                         if (strlen(group) == 0) {
599                                 pr_info("Group name is not specifiled\n");
600                                 return -EINVAL;
601                         }
602                 }
603                 if (strlen(event) == 0) {
604                         pr_info("Event name is not specifiled\n");
605                         return -EINVAL;
606                 }
607         }
608
609         if (isdigit(argv[1][0])) {
610                 if (is_return)
611                         return -EINVAL;
612                 /* an address specified */
613                 ret = strict_strtoul(&argv[0][2], 0, (unsigned long *)&addr);
614                 if (ret)
615                         return ret;
616         } else {
617                 /* a symbol specified */
618                 symbol = argv[1];
619                 /* TODO: support .init module functions */
620                 ret = split_symbol_offset(symbol, &offset);
621                 if (ret)
622                         return ret;
623                 if (offset && is_return)
624                         return -EINVAL;
625         }
626         argc -= 2; argv += 2;
627
628         /* setup a probe */
629         if (!group)
630                 group = KPROBE_EVENT_SYSTEM;
631         if (!event) {
632                 /* Make a new event name */
633                 if (symbol)
634                         snprintf(buf, MAX_EVENT_NAME_LEN, "%c@%s%+ld",
635                                  is_return ? 'r' : 'p', symbol, offset);
636                 else
637                         snprintf(buf, MAX_EVENT_NAME_LEN, "%c@0x%p",
638                                  is_return ? 'r' : 'p', addr);
639                 event = buf;
640         }
641         tp = alloc_trace_probe(group, event, addr, symbol, offset, argc,
642                                is_return);
643         if (IS_ERR(tp))
644                 return PTR_ERR(tp);
645
646         /* parse arguments */
647         ret = 0;
648         for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
649                 /* Parse argument name */
650                 arg = strchr(argv[i], '=');
651                 if (arg)
652                         *arg++ = '\0';
653                 else
654                         arg = argv[i];
655                 tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
656
657                 /* Parse fetch argument */
658                 if (strlen(arg) > MAX_ARGSTR_LEN) {
659                         pr_info("Argument%d(%s) is too long.\n", i, arg);
660                         ret = -ENOSPC;
661                         goto error;
662                 }
663                 ret = parse_probe_arg(arg, &tp->args[i].fetch, is_return);
664                 if (ret)
665                         goto error;
666         }
667         tp->nr_args = i;
668
669         ret = register_trace_probe(tp);
670         if (ret)
671                 goto error;
672         return 0;
673
674 error:
675         free_trace_probe(tp);
676         return ret;
677 }
678
679 static void cleanup_all_probes(void)
680 {
681         struct trace_probe *tp;
682
683         mutex_lock(&probe_lock);
684         /* TODO: Use batch unregistration */
685         while (!list_empty(&probe_list)) {
686                 tp = list_entry(probe_list.next, struct trace_probe, list);
687                 unregister_trace_probe(tp);
688                 free_trace_probe(tp);
689         }
690         mutex_unlock(&probe_lock);
691 }
692
693
694 /* Probes listing interfaces */
695 static void *probes_seq_start(struct seq_file *m, loff_t *pos)
696 {
697         mutex_lock(&probe_lock);
698         return seq_list_start(&probe_list, *pos);
699 }
700
701 static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
702 {
703         return seq_list_next(v, &probe_list, pos);
704 }
705
706 static void probes_seq_stop(struct seq_file *m, void *v)
707 {
708         mutex_unlock(&probe_lock);
709 }
710
711 static int probes_seq_show(struct seq_file *m, void *v)
712 {
713         struct trace_probe *tp = v;
714         int i, ret;
715         char buf[MAX_ARGSTR_LEN + 1];
716
717         seq_printf(m, "%c", probe_is_return(tp) ? 'r' : 'p');
718         seq_printf(m, ":%s", tp->call.name);
719
720         if (tp->symbol)
721                 seq_printf(m, " %s+%u", probe_symbol(tp), tp->rp.kp.offset);
722         else
723                 seq_printf(m, " 0x%p", tp->rp.kp.addr);
724
725         for (i = 0; i < tp->nr_args; i++) {
726                 ret = probe_arg_string(buf, MAX_ARGSTR_LEN, &tp->args[i].fetch);
727                 if (ret < 0) {
728                         pr_warning("Argument%d decoding error(%d).\n", i, ret);
729                         return ret;
730                 }
731                 seq_printf(m, " %s=%s", tp->args[i].name, buf);
732         }
733         seq_printf(m, "\n");
734         return 0;
735 }
736
737 static const struct seq_operations probes_seq_op = {
738         .start  = probes_seq_start,
739         .next   = probes_seq_next,
740         .stop   = probes_seq_stop,
741         .show   = probes_seq_show
742 };
743
744 static int probes_open(struct inode *inode, struct file *file)
745 {
746         if ((file->f_mode & FMODE_WRITE) &&
747             (file->f_flags & O_TRUNC))
748                 cleanup_all_probes();
749
750         return seq_open(file, &probes_seq_op);
751 }
752
753 static int command_trace_probe(const char *buf)
754 {
755         char **argv;
756         int argc = 0, ret = 0;
757
758         argv = argv_split(GFP_KERNEL, buf, &argc);
759         if (!argv)
760                 return -ENOMEM;
761
762         if (argc)
763                 ret = create_trace_probe(argc, argv);
764
765         argv_free(argv);
766         return ret;
767 }
768
769 #define WRITE_BUFSIZE 128
770
771 static ssize_t probes_write(struct file *file, const char __user *buffer,
772                             size_t count, loff_t *ppos)
773 {
774         char *kbuf, *tmp;
775         int ret;
776         size_t done;
777         size_t size;
778
779         kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
780         if (!kbuf)
781                 return -ENOMEM;
782
783         ret = done = 0;
784         while (done < count) {
785                 size = count - done;
786                 if (size >= WRITE_BUFSIZE)
787                         size = WRITE_BUFSIZE - 1;
788                 if (copy_from_user(kbuf, buffer + done, size)) {
789                         ret = -EFAULT;
790                         goto out;
791                 }
792                 kbuf[size] = '\0';
793                 tmp = strchr(kbuf, '\n');
794                 if (tmp) {
795                         *tmp = '\0';
796                         size = tmp - kbuf + 1;
797                 } else if (done + size < count) {
798                         pr_warning("Line length is too long: "
799                                    "Should be less than %d.", WRITE_BUFSIZE);
800                         ret = -EINVAL;
801                         goto out;
802                 }
803                 done += size;
804                 /* Remove comments */
805                 tmp = strchr(kbuf, '#');
806                 if (tmp)
807                         *tmp = '\0';
808
809                 ret = command_trace_probe(kbuf);
810                 if (ret)
811                         goto out;
812         }
813         ret = done;
814 out:
815         kfree(kbuf);
816         return ret;
817 }
818
819 static const struct file_operations kprobe_events_ops = {
820         .owner          = THIS_MODULE,
821         .open           = probes_open,
822         .read           = seq_read,
823         .llseek         = seq_lseek,
824         .release        = seq_release,
825         .write          = probes_write,
826 };
827
828 /* Probes profiling interfaces */
829 static int probes_profile_seq_show(struct seq_file *m, void *v)
830 {
831         struct trace_probe *tp = v;
832
833         seq_printf(m, "  %-44s %15lu %15lu\n", tp->call.name, tp->nhit,
834                    tp->rp.kp.nmissed);
835
836         return 0;
837 }
838
839 static const struct seq_operations profile_seq_op = {
840         .start  = probes_seq_start,
841         .next   = probes_seq_next,
842         .stop   = probes_seq_stop,
843         .show   = probes_profile_seq_show
844 };
845
846 static int profile_open(struct inode *inode, struct file *file)
847 {
848         return seq_open(file, &profile_seq_op);
849 }
850
851 static const struct file_operations kprobe_profile_ops = {
852         .owner          = THIS_MODULE,
853         .open           = profile_open,
854         .read           = seq_read,
855         .llseek         = seq_lseek,
856         .release        = seq_release,
857 };
858
859 /* Kprobe handler */
860 static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
861 {
862         struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
863         struct kprobe_trace_entry *entry;
864         struct ring_buffer_event *event;
865         struct ring_buffer *buffer;
866         int size, i, pc;
867         unsigned long irq_flags;
868         struct ftrace_event_call *call = &tp->call;
869
870         tp->nhit++;
871
872         local_save_flags(irq_flags);
873         pc = preempt_count();
874
875         size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
876
877         event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
878                                                   irq_flags, pc);
879         if (!event)
880                 return 0;
881
882         entry = ring_buffer_event_data(event);
883         entry->nargs = tp->nr_args;
884         entry->ip = (unsigned long)kp->addr;
885         for (i = 0; i < tp->nr_args; i++)
886                 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
887
888         if (!filter_current_check_discard(buffer, call, entry, event))
889                 trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
890         return 0;
891 }
892
893 /* Kretprobe handler */
894 static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri,
895                                           struct pt_regs *regs)
896 {
897         struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
898         struct kretprobe_trace_entry *entry;
899         struct ring_buffer_event *event;
900         struct ring_buffer *buffer;
901         int size, i, pc;
902         unsigned long irq_flags;
903         struct ftrace_event_call *call = &tp->call;
904
905         local_save_flags(irq_flags);
906         pc = preempt_count();
907
908         size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
909
910         event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
911                                                   irq_flags, pc);
912         if (!event)
913                 return 0;
914
915         entry = ring_buffer_event_data(event);
916         entry->nargs = tp->nr_args;
917         entry->func = (unsigned long)tp->rp.kp.addr;
918         entry->ret_ip = (unsigned long)ri->ret_addr;
919         for (i = 0; i < tp->nr_args; i++)
920                 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
921
922         if (!filter_current_check_discard(buffer, call, entry, event))
923                 trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
924
925         return 0;
926 }
927
928 /* Event entry printers */
929 enum print_line_t
930 print_kprobe_event(struct trace_iterator *iter, int flags)
931 {
932         struct kprobe_trace_entry *field;
933         struct trace_seq *s = &iter->seq;
934         struct trace_event *event;
935         struct trace_probe *tp;
936         int i;
937
938         field = (struct kprobe_trace_entry *)iter->ent;
939         event = ftrace_find_event(field->ent.type);
940         tp = container_of(event, struct trace_probe, event);
941
942         if (!trace_seq_printf(s, "%s: (", tp->call.name))
943                 goto partial;
944
945         if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
946                 goto partial;
947
948         if (!trace_seq_puts(s, ")"))
949                 goto partial;
950
951         for (i = 0; i < field->nargs; i++)
952                 if (!trace_seq_printf(s, " %s=%lx",
953                                       tp->args[i].name, field->args[i]))
954                         goto partial;
955
956         if (!trace_seq_puts(s, "\n"))
957                 goto partial;
958
959         return TRACE_TYPE_HANDLED;
960 partial:
961         return TRACE_TYPE_PARTIAL_LINE;
962 }
963
964 enum print_line_t
965 print_kretprobe_event(struct trace_iterator *iter, int flags)
966 {
967         struct kretprobe_trace_entry *field;
968         struct trace_seq *s = &iter->seq;
969         struct trace_event *event;
970         struct trace_probe *tp;
971         int i;
972
973         field = (struct kretprobe_trace_entry *)iter->ent;
974         event = ftrace_find_event(field->ent.type);
975         tp = container_of(event, struct trace_probe, event);
976
977         if (!trace_seq_printf(s, "%s: (", tp->call.name))
978                 goto partial;
979
980         if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
981                 goto partial;
982
983         if (!trace_seq_puts(s, " <- "))
984                 goto partial;
985
986         if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
987                 goto partial;
988
989         if (!trace_seq_puts(s, ")"))
990                 goto partial;
991
992         for (i = 0; i < field->nargs; i++)
993                 if (!trace_seq_printf(s, " %s=%lx",
994                                       tp->args[i].name, field->args[i]))
995                         goto partial;
996
997         if (!trace_seq_puts(s, "\n"))
998                 goto partial;
999
1000         return TRACE_TYPE_HANDLED;
1001 partial:
1002         return TRACE_TYPE_PARTIAL_LINE;
1003 }
1004
1005 static int probe_event_enable(struct ftrace_event_call *call)
1006 {
1007         struct trace_probe *tp = (struct trace_probe *)call->data;
1008
1009         tp->flags |= TP_FLAG_TRACE;
1010         if (probe_is_return(tp))
1011                 return enable_kretprobe(&tp->rp);
1012         else
1013                 return enable_kprobe(&tp->rp.kp);
1014 }
1015
1016 static void probe_event_disable(struct ftrace_event_call *call)
1017 {
1018         struct trace_probe *tp = (struct trace_probe *)call->data;
1019
1020         tp->flags &= ~TP_FLAG_TRACE;
1021         if (!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE))) {
1022                 if (probe_is_return(tp))
1023                         disable_kretprobe(&tp->rp);
1024                 else
1025                         disable_kprobe(&tp->rp.kp);
1026         }
1027 }
1028
1029 static int probe_event_raw_init(struct ftrace_event_call *event_call)
1030 {
1031         INIT_LIST_HEAD(&event_call->fields);
1032
1033         return 0;
1034 }
1035
1036 #undef DEFINE_FIELD
1037 #define DEFINE_FIELD(type, item, name, is_signed)                       \
1038         do {                                                            \
1039                 ret = trace_define_field(event_call, #type, name,       \
1040                                          offsetof(typeof(field), item), \
1041                                          sizeof(field.item), is_signed, \
1042                                          FILTER_OTHER);                 \
1043                 if (ret)                                                \
1044                         return ret;                                     \
1045         } while (0)
1046
1047 static int kprobe_event_define_fields(struct ftrace_event_call *event_call)
1048 {
1049         int ret, i;
1050         struct kprobe_trace_entry field;
1051         struct trace_probe *tp = (struct trace_probe *)event_call->data;
1052
1053         ret = trace_define_common_fields(event_call);
1054         if (!ret)
1055                 return ret;
1056
1057         DEFINE_FIELD(unsigned long, ip, "ip", 0);
1058         DEFINE_FIELD(int, nargs, "nargs", 1);
1059         /* Set argument names as fields */
1060         for (i = 0; i < tp->nr_args; i++)
1061                 DEFINE_FIELD(unsigned long, args[i], tp->args[i].name, 0);
1062         return 0;
1063 }
1064
1065 static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
1066 {
1067         int ret, i;
1068         struct kretprobe_trace_entry field;
1069         struct trace_probe *tp = (struct trace_probe *)event_call->data;
1070
1071         ret = trace_define_common_fields(event_call);
1072         if (!ret)
1073                 return ret;
1074
1075         DEFINE_FIELD(unsigned long, func, "func", 0);
1076         DEFINE_FIELD(unsigned long, ret_ip, "ret_ip", 0);
1077         DEFINE_FIELD(int, nargs, "nargs", 1);
1078         /* Set argument names as fields */
1079         for (i = 0; i < tp->nr_args; i++)
1080                 DEFINE_FIELD(unsigned long, args[i], tp->args[i].name, 0);
1081         return 0;
1082 }
1083
1084 static int __probe_event_show_format(struct trace_seq *s,
1085                                      struct trace_probe *tp, const char *fmt,
1086                                      const char *arg)
1087 {
1088         int i;
1089
1090         /* Show format */
1091         if (!trace_seq_printf(s, "\nprint fmt: \"%s", fmt))
1092                 return 0;
1093
1094         for (i = 0; i < tp->nr_args; i++)
1095                 if (!trace_seq_printf(s, " %s=%%lx", tp->args[i].name))
1096                         return 0;
1097
1098         if (!trace_seq_printf(s, "\", %s", arg))
1099                 return 0;
1100
1101         for (i = 0; i < tp->nr_args; i++)
1102                 if (!trace_seq_printf(s, ", REC->%s", tp->args[i].name))
1103                         return 0;
1104
1105         return trace_seq_puts(s, "\n");
1106 }
1107
1108 #undef SHOW_FIELD
1109 #define SHOW_FIELD(type, item, name)                                    \
1110         do {                                                            \
1111                 ret = trace_seq_printf(s, "\tfield: " #type " %s;\t"    \
1112                                 "offset:%u;\tsize:%u;\n", name,         \
1113                                 (unsigned int)offsetof(typeof(field), item),\
1114                                 (unsigned int)sizeof(type));            \
1115                 if (!ret)                                               \
1116                         return 0;                                       \
1117         } while (0)
1118
1119 static int kprobe_event_show_format(struct ftrace_event_call *call,
1120                                     struct trace_seq *s)
1121 {
1122         struct kprobe_trace_entry field __attribute__((unused));
1123         int ret, i;
1124         struct trace_probe *tp = (struct trace_probe *)call->data;
1125
1126         SHOW_FIELD(unsigned long, ip, "ip");
1127         SHOW_FIELD(int, nargs, "nargs");
1128
1129         /* Show fields */
1130         for (i = 0; i < tp->nr_args; i++)
1131                 SHOW_FIELD(unsigned long, args[i], tp->args[i].name);
1132         trace_seq_puts(s, "\n");
1133
1134         return __probe_event_show_format(s, tp, "(%lx)", "REC->ip");
1135 }
1136
1137 static int kretprobe_event_show_format(struct ftrace_event_call *call,
1138                                        struct trace_seq *s)
1139 {
1140         struct kretprobe_trace_entry field __attribute__((unused));
1141         int ret, i;
1142         struct trace_probe *tp = (struct trace_probe *)call->data;
1143
1144         SHOW_FIELD(unsigned long, func, "func");
1145         SHOW_FIELD(unsigned long, ret_ip, "ret_ip");
1146         SHOW_FIELD(int, nargs, "nargs");
1147
1148         /* Show fields */
1149         for (i = 0; i < tp->nr_args; i++)
1150                 SHOW_FIELD(unsigned long, args[i], tp->args[i].name);
1151         trace_seq_puts(s, "\n");
1152
1153         return __probe_event_show_format(s, tp, "(%lx <- %lx)",
1154                                           "REC->func, REC->ret_ip");
1155 }
1156
1157 #ifdef CONFIG_EVENT_PROFILE
1158
1159 /* Kprobe profile handler */
1160 static __kprobes int kprobe_profile_func(struct kprobe *kp,
1161                                          struct pt_regs *regs)
1162 {
1163         struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
1164         struct ftrace_event_call *call = &tp->call;
1165         struct kprobe_trace_entry *entry;
1166         struct trace_entry *ent;
1167         int size, __size, i, pc, __cpu;
1168         unsigned long irq_flags;
1169         char *raw_data;
1170
1171         pc = preempt_count();
1172         __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
1173         size = ALIGN(__size + sizeof(u32), sizeof(u64));
1174         size -= sizeof(u32);
1175         if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
1176                      "profile buffer not large enough"))
1177                 return 0;
1178
1179         /*
1180          * Protect the non nmi buffer
1181          * This also protects the rcu read side
1182          */
1183         local_irq_save(irq_flags);
1184         __cpu = smp_processor_id();
1185
1186         if (in_nmi())
1187                 raw_data = rcu_dereference(trace_profile_buf_nmi);
1188         else
1189                 raw_data = rcu_dereference(trace_profile_buf);
1190
1191         if (!raw_data)
1192                 goto end;
1193
1194         raw_data = per_cpu_ptr(raw_data, __cpu);
1195         /* Zero dead bytes from alignment to avoid buffer leak to userspace */
1196         *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
1197         entry = (struct kprobe_trace_entry *)raw_data;
1198         ent = &entry->ent;
1199
1200         tracing_generic_entry_update(ent, irq_flags, pc);
1201         ent->type = call->id;
1202         entry->nargs = tp->nr_args;
1203         entry->ip = (unsigned long)kp->addr;
1204         for (i = 0; i < tp->nr_args; i++)
1205                 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
1206         perf_tp_event(call->id, entry->ip, 1, entry, size);
1207 end:
1208         local_irq_restore(irq_flags);
1209         return 0;
1210 }
1211
1212 /* Kretprobe profile handler */
1213 static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
1214                                             struct pt_regs *regs)
1215 {
1216         struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
1217         struct ftrace_event_call *call = &tp->call;
1218         struct kretprobe_trace_entry *entry;
1219         struct trace_entry *ent;
1220         int size, __size, i, pc, __cpu;
1221         unsigned long irq_flags;
1222         char *raw_data;
1223
1224         pc = preempt_count();
1225         __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
1226         size = ALIGN(__size + sizeof(u32), sizeof(u64));
1227         size -= sizeof(u32);
1228         if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
1229                      "profile buffer not large enough"))
1230                 return 0;
1231
1232         /*
1233          * Protect the non nmi buffer
1234          * This also protects the rcu read side
1235          */
1236         local_irq_save(irq_flags);
1237         __cpu = smp_processor_id();
1238
1239         if (in_nmi())
1240                 raw_data = rcu_dereference(trace_profile_buf_nmi);
1241         else
1242                 raw_data = rcu_dereference(trace_profile_buf);
1243
1244         if (!raw_data)
1245                 goto end;
1246
1247         raw_data = per_cpu_ptr(raw_data, __cpu);
1248         /* Zero dead bytes from alignment to avoid buffer leak to userspace */
1249         *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
1250         entry = (struct kretprobe_trace_entry *)raw_data;
1251         ent = &entry->ent;
1252
1253         tracing_generic_entry_update(ent, irq_flags, pc);
1254         ent->type = call->id;
1255         entry->nargs = tp->nr_args;
1256         entry->func = (unsigned long)tp->rp.kp.addr;
1257         entry->ret_ip = (unsigned long)ri->ret_addr;
1258         for (i = 0; i < tp->nr_args; i++)
1259                 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
1260         perf_tp_event(call->id, entry->ret_ip, 1, entry, size);
1261 end:
1262         local_irq_restore(irq_flags);
1263         return 0;
1264 }
1265
1266 static int probe_profile_enable(struct ftrace_event_call *call)
1267 {
1268         struct trace_probe *tp = (struct trace_probe *)call->data;
1269
1270         tp->flags |= TP_FLAG_PROFILE;
1271
1272         if (probe_is_return(tp))
1273                 return enable_kretprobe(&tp->rp);
1274         else
1275                 return enable_kprobe(&tp->rp.kp);
1276 }
1277
1278 static void probe_profile_disable(struct ftrace_event_call *call)
1279 {
1280         struct trace_probe *tp = (struct trace_probe *)call->data;
1281
1282         tp->flags &= ~TP_FLAG_PROFILE;
1283
1284         if (!(tp->flags & TP_FLAG_TRACE)) {
1285                 if (probe_is_return(tp))
1286                         disable_kretprobe(&tp->rp);
1287                 else
1288                         disable_kprobe(&tp->rp.kp);
1289         }
1290 }
1291 #endif  /* CONFIG_EVENT_PROFILE */
1292
1293
1294 static __kprobes
1295 int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1296 {
1297         struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
1298
1299         if (tp->flags & TP_FLAG_TRACE)
1300                 kprobe_trace_func(kp, regs);
1301 #ifdef CONFIG_EVENT_PROFILE
1302         if (tp->flags & TP_FLAG_PROFILE)
1303                 kprobe_profile_func(kp, regs);
1304 #endif  /* CONFIG_EVENT_PROFILE */
1305         return 0;       /* We don't tweek kernel, so just return 0 */
1306 }
1307
1308 static __kprobes
1309 int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1310 {
1311         struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
1312
1313         if (tp->flags & TP_FLAG_TRACE)
1314                 kretprobe_trace_func(ri, regs);
1315 #ifdef CONFIG_EVENT_PROFILE
1316         if (tp->flags & TP_FLAG_PROFILE)
1317                 kretprobe_profile_func(ri, regs);
1318 #endif  /* CONFIG_EVENT_PROFILE */
1319         return 0;       /* We don't tweek kernel, so just return 0 */
1320 }
1321
1322 static int register_probe_event(struct trace_probe *tp)
1323 {
1324         struct ftrace_event_call *call = &tp->call;
1325         int ret;
1326
1327         /* Initialize ftrace_event_call */
1328         if (probe_is_return(tp)) {
1329                 tp->event.trace = print_kretprobe_event;
1330                 call->raw_init = probe_event_raw_init;
1331                 call->show_format = kretprobe_event_show_format;
1332                 call->define_fields = kretprobe_event_define_fields;
1333         } else {
1334                 tp->event.trace = print_kprobe_event;
1335                 call->raw_init = probe_event_raw_init;
1336                 call->show_format = kprobe_event_show_format;
1337                 call->define_fields = kprobe_event_define_fields;
1338         }
1339         call->event = &tp->event;
1340         call->id = register_ftrace_event(&tp->event);
1341         if (!call->id)
1342                 return -ENODEV;
1343         call->enabled = 0;
1344         call->regfunc = probe_event_enable;
1345         call->unregfunc = probe_event_disable;
1346
1347 #ifdef CONFIG_EVENT_PROFILE
1348         atomic_set(&call->profile_count, -1);
1349         call->profile_enable = probe_profile_enable;
1350         call->profile_disable = probe_profile_disable;
1351 #endif
1352         call->data = tp;
1353         ret = trace_add_event_call(call);
1354         if (ret) {
1355                 pr_info("Failed to register kprobe event: %s\n", call->name);
1356                 unregister_ftrace_event(&tp->event);
1357         }
1358         return ret;
1359 }
1360
1361 static void unregister_probe_event(struct trace_probe *tp)
1362 {
1363         /* tp->event is unregistered in trace_remove_event_call() */
1364         trace_remove_event_call(&tp->call);
1365 }
1366
1367 /* Make a debugfs interface for controling probe points */
1368 static __init int init_kprobe_trace(void)
1369 {
1370         struct dentry *d_tracer;
1371         struct dentry *entry;
1372
1373         d_tracer = tracing_init_dentry();
1374         if (!d_tracer)
1375                 return 0;
1376
1377         entry = debugfs_create_file("kprobe_events", 0644, d_tracer,
1378                                     NULL, &kprobe_events_ops);
1379
1380         /* Event list interface */
1381         if (!entry)
1382                 pr_warning("Could not create debugfs "
1383                            "'kprobe_events' entry\n");
1384
1385         /* Profile interface */
1386         entry = debugfs_create_file("kprobe_profile", 0444, d_tracer,
1387                                     NULL, &kprobe_profile_ops);
1388
1389         if (!entry)
1390                 pr_warning("Could not create debugfs "
1391                            "'kprobe_profile' entry\n");
1392         return 0;
1393 }
1394 fs_initcall(init_kprobe_trace);
1395
1396
1397 #ifdef CONFIG_FTRACE_STARTUP_TEST
1398
1399 static int kprobe_trace_selftest_target(int a1, int a2, int a3,
1400                                         int a4, int a5, int a6)
1401 {
1402         return a1 + a2 + a3 + a4 + a5 + a6;
1403 }
1404
1405 static __init int kprobe_trace_self_tests_init(void)
1406 {
1407         int ret;
1408         int (*target)(int, int, int, int, int, int);
1409
1410         target = kprobe_trace_selftest_target;
1411
1412         pr_info("Testing kprobe tracing: ");
1413
1414         ret = command_trace_probe("p:testprobe kprobe_trace_selftest_target "
1415                                   "a1 a2 a3 a4 a5 a6");
1416         if (WARN_ON_ONCE(ret))
1417                 pr_warning("error enabling function entry\n");
1418
1419         ret = command_trace_probe("r:testprobe2 kprobe_trace_selftest_target "
1420                                   "ra rv");
1421         if (WARN_ON_ONCE(ret))
1422                 pr_warning("error enabling function return\n");
1423
1424         ret = target(1, 2, 3, 4, 5, 6);
1425
1426         cleanup_all_probes();
1427
1428         pr_cont("OK\n");
1429         return 0;
1430 }
1431
1432 late_initcall(kprobe_trace_self_tests_init);
1433
1434 #endif