tracing/events: Add 'signed' field to format files
[safe/jmp/linux-2.6] / kernel / trace / trace_syscalls.c
1 #include <trace/syscall.h>
2 #include <trace/events/syscalls.h>
3 #include <linux/kernel.h>
4 #include <linux/ftrace.h>
5 #include <linux/perf_event.h>
6 #include <asm/syscall.h>
7
8 #include "trace_output.h"
9 #include "trace.h"
10
11 static DEFINE_MUTEX(syscall_trace_lock);
12 static int sys_refcount_enter;
13 static int sys_refcount_exit;
14 static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
15 static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
16
17 enum print_line_t
18 print_syscall_enter(struct trace_iterator *iter, int flags)
19 {
20         struct trace_seq *s = &iter->seq;
21         struct trace_entry *ent = iter->ent;
22         struct syscall_trace_enter *trace;
23         struct syscall_metadata *entry;
24         int i, ret, syscall;
25
26         trace = (typeof(trace))ent;
27         syscall = trace->nr;
28         entry = syscall_nr_to_meta(syscall);
29
30         if (!entry)
31                 goto end;
32
33         if (entry->enter_id != ent->type) {
34                 WARN_ON_ONCE(1);
35                 goto end;
36         }
37
38         ret = trace_seq_printf(s, "%s(", entry->name);
39         if (!ret)
40                 return TRACE_TYPE_PARTIAL_LINE;
41
42         for (i = 0; i < entry->nb_args; i++) {
43                 /* parameter types */
44                 if (trace_flags & TRACE_ITER_VERBOSE) {
45                         ret = trace_seq_printf(s, "%s ", entry->types[i]);
46                         if (!ret)
47                                 return TRACE_TYPE_PARTIAL_LINE;
48                 }
49                 /* parameter values */
50                 ret = trace_seq_printf(s, "%s: %lx%s", entry->args[i],
51                                        trace->args[i],
52                                        i == entry->nb_args - 1 ? "" : ", ");
53                 if (!ret)
54                         return TRACE_TYPE_PARTIAL_LINE;
55         }
56
57         ret = trace_seq_putc(s, ')');
58         if (!ret)
59                 return TRACE_TYPE_PARTIAL_LINE;
60
61 end:
62         ret =  trace_seq_putc(s, '\n');
63         if (!ret)
64                 return TRACE_TYPE_PARTIAL_LINE;
65
66         return TRACE_TYPE_HANDLED;
67 }
68
69 enum print_line_t
70 print_syscall_exit(struct trace_iterator *iter, int flags)
71 {
72         struct trace_seq *s = &iter->seq;
73         struct trace_entry *ent = iter->ent;
74         struct syscall_trace_exit *trace;
75         int syscall;
76         struct syscall_metadata *entry;
77         int ret;
78
79         trace = (typeof(trace))ent;
80         syscall = trace->nr;
81         entry = syscall_nr_to_meta(syscall);
82
83         if (!entry) {
84                 trace_seq_printf(s, "\n");
85                 return TRACE_TYPE_HANDLED;
86         }
87
88         if (entry->exit_id != ent->type) {
89                 WARN_ON_ONCE(1);
90                 return TRACE_TYPE_UNHANDLED;
91         }
92
93         ret = trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
94                                 trace->ret);
95         if (!ret)
96                 return TRACE_TYPE_PARTIAL_LINE;
97
98         return TRACE_TYPE_HANDLED;
99 }
100
101 extern char *__bad_type_size(void);
102
103 #define SYSCALL_FIELD(type, name)                                       \
104         sizeof(type) != sizeof(trace.name) ?                            \
105                 __bad_type_size() :                                     \
106                 #type, #name, offsetof(typeof(trace), name),            \
107                 sizeof(trace.name), is_signed_type(type)
108
109 int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s)
110 {
111         int i;
112         int nr;
113         int ret;
114         struct syscall_metadata *entry;
115         struct syscall_trace_enter trace;
116         int offset = offsetof(struct syscall_trace_enter, args);
117
118         nr = syscall_name_to_nr(call->data);
119         entry = syscall_nr_to_meta(nr);
120
121         if (!entry)
122                 return 0;
123
124         ret = trace_seq_printf(s, "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
125                                "\tsigned:%u;\n",
126                                SYSCALL_FIELD(int, nr));
127         if (!ret)
128                 return 0;
129
130         for (i = 0; i < entry->nb_args; i++) {
131                 ret = trace_seq_printf(s, "\tfield:%s %s;", entry->types[i],
132                                         entry->args[i]);
133                 if (!ret)
134                         return 0;
135                 ret = trace_seq_printf(s, "\toffset:%d;\tsize:%zu;"
136                                        "\tsigned:%u;\n", offset,
137                                        sizeof(unsigned long),
138                                        is_signed_type(unsigned long));
139                 if (!ret)
140                         return 0;
141                 offset += sizeof(unsigned long);
142         }
143
144         trace_seq_puts(s, "\nprint fmt: \"");
145         for (i = 0; i < entry->nb_args; i++) {
146                 ret = trace_seq_printf(s, "%s: 0x%%0%zulx%s", entry->args[i],
147                                         sizeof(unsigned long),
148                                         i == entry->nb_args - 1 ? "" : ", ");
149                 if (!ret)
150                         return 0;
151         }
152         trace_seq_putc(s, '"');
153
154         for (i = 0; i < entry->nb_args; i++) {
155                 ret = trace_seq_printf(s, ", ((unsigned long)(REC->%s))",
156                                        entry->args[i]);
157                 if (!ret)
158                         return 0;
159         }
160
161         return trace_seq_putc(s, '\n');
162 }
163
164 int syscall_exit_format(struct ftrace_event_call *call, struct trace_seq *s)
165 {
166         int ret;
167         struct syscall_trace_exit trace;
168
169         ret = trace_seq_printf(s,
170                                "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
171                                "\tsigned:%u;\n"
172                                "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
173                                "\tsigned:%u;\n",
174                                SYSCALL_FIELD(int, nr),
175                                SYSCALL_FIELD(long, ret));
176         if (!ret)
177                 return 0;
178
179         return trace_seq_printf(s, "\nprint fmt: \"0x%%lx\", REC->ret\n");
180 }
181
182 int syscall_enter_define_fields(struct ftrace_event_call *call)
183 {
184         struct syscall_trace_enter trace;
185         struct syscall_metadata *meta;
186         int ret;
187         int nr;
188         int i;
189         int offset = offsetof(typeof(trace), args);
190
191         nr = syscall_name_to_nr(call->data);
192         meta = syscall_nr_to_meta(nr);
193
194         if (!meta)
195                 return 0;
196
197         ret = trace_define_common_fields(call);
198         if (ret)
199                 return ret;
200
201         for (i = 0; i < meta->nb_args; i++) {
202                 ret = trace_define_field(call, meta->types[i],
203                                          meta->args[i], offset,
204                                          sizeof(unsigned long), 0,
205                                          FILTER_OTHER);
206                 offset += sizeof(unsigned long);
207         }
208
209         return ret;
210 }
211
212 int syscall_exit_define_fields(struct ftrace_event_call *call)
213 {
214         struct syscall_trace_exit trace;
215         int ret;
216
217         ret = trace_define_common_fields(call);
218         if (ret)
219                 return ret;
220
221         ret = trace_define_field(call, SYSCALL_FIELD(long, ret),
222                                  FILTER_OTHER);
223
224         return ret;
225 }
226
227 void ftrace_syscall_enter(struct pt_regs *regs, long id)
228 {
229         struct syscall_trace_enter *entry;
230         struct syscall_metadata *sys_data;
231         struct ring_buffer_event *event;
232         struct ring_buffer *buffer;
233         int size;
234         int syscall_nr;
235
236         syscall_nr = syscall_get_nr(current, regs);
237         if (syscall_nr < 0)
238                 return;
239         if (!test_bit(syscall_nr, enabled_enter_syscalls))
240                 return;
241
242         sys_data = syscall_nr_to_meta(syscall_nr);
243         if (!sys_data)
244                 return;
245
246         size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
247
248         event = trace_current_buffer_lock_reserve(&buffer, sys_data->enter_id,
249                                                   size, 0, 0);
250         if (!event)
251                 return;
252
253         entry = ring_buffer_event_data(event);
254         entry->nr = syscall_nr;
255         syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
256
257         if (!filter_current_check_discard(buffer, sys_data->enter_event,
258                                           entry, event))
259                 trace_current_buffer_unlock_commit(buffer, event, 0, 0);
260 }
261
262 void ftrace_syscall_exit(struct pt_regs *regs, long ret)
263 {
264         struct syscall_trace_exit *entry;
265         struct syscall_metadata *sys_data;
266         struct ring_buffer_event *event;
267         struct ring_buffer *buffer;
268         int syscall_nr;
269
270         syscall_nr = syscall_get_nr(current, regs);
271         if (syscall_nr < 0)
272                 return;
273         if (!test_bit(syscall_nr, enabled_exit_syscalls))
274                 return;
275
276         sys_data = syscall_nr_to_meta(syscall_nr);
277         if (!sys_data)
278                 return;
279
280         event = trace_current_buffer_lock_reserve(&buffer, sys_data->exit_id,
281                                 sizeof(*entry), 0, 0);
282         if (!event)
283                 return;
284
285         entry = ring_buffer_event_data(event);
286         entry->nr = syscall_nr;
287         entry->ret = syscall_get_return_value(current, regs);
288
289         if (!filter_current_check_discard(buffer, sys_data->exit_event,
290                                           entry, event))
291                 trace_current_buffer_unlock_commit(buffer, event, 0, 0);
292 }
293
294 int reg_event_syscall_enter(void *ptr)
295 {
296         int ret = 0;
297         int num;
298         char *name;
299
300         name = (char *)ptr;
301         num = syscall_name_to_nr(name);
302         if (num < 0 || num >= NR_syscalls)
303                 return -ENOSYS;
304         mutex_lock(&syscall_trace_lock);
305         if (!sys_refcount_enter)
306                 ret = register_trace_sys_enter(ftrace_syscall_enter);
307         if (ret) {
308                 pr_info("event trace: Could not activate"
309                                 "syscall entry trace point");
310         } else {
311                 set_bit(num, enabled_enter_syscalls);
312                 sys_refcount_enter++;
313         }
314         mutex_unlock(&syscall_trace_lock);
315         return ret;
316 }
317
318 void unreg_event_syscall_enter(void *ptr)
319 {
320         int num;
321         char *name;
322
323         name = (char *)ptr;
324         num = syscall_name_to_nr(name);
325         if (num < 0 || num >= NR_syscalls)
326                 return;
327         mutex_lock(&syscall_trace_lock);
328         sys_refcount_enter--;
329         clear_bit(num, enabled_enter_syscalls);
330         if (!sys_refcount_enter)
331                 unregister_trace_sys_enter(ftrace_syscall_enter);
332         mutex_unlock(&syscall_trace_lock);
333 }
334
335 int reg_event_syscall_exit(void *ptr)
336 {
337         int ret = 0;
338         int num;
339         char *name;
340
341         name = (char *)ptr;
342         num = syscall_name_to_nr(name);
343         if (num < 0 || num >= NR_syscalls)
344                 return -ENOSYS;
345         mutex_lock(&syscall_trace_lock);
346         if (!sys_refcount_exit)
347                 ret = register_trace_sys_exit(ftrace_syscall_exit);
348         if (ret) {
349                 pr_info("event trace: Could not activate"
350                                 "syscall exit trace point");
351         } else {
352                 set_bit(num, enabled_exit_syscalls);
353                 sys_refcount_exit++;
354         }
355         mutex_unlock(&syscall_trace_lock);
356         return ret;
357 }
358
359 void unreg_event_syscall_exit(void *ptr)
360 {
361         int num;
362         char *name;
363
364         name = (char *)ptr;
365         num = syscall_name_to_nr(name);
366         if (num < 0 || num >= NR_syscalls)
367                 return;
368         mutex_lock(&syscall_trace_lock);
369         sys_refcount_exit--;
370         clear_bit(num, enabled_exit_syscalls);
371         if (!sys_refcount_exit)
372                 unregister_trace_sys_exit(ftrace_syscall_exit);
373         mutex_unlock(&syscall_trace_lock);
374 }
375
376 struct trace_event event_syscall_enter = {
377         .trace                  = print_syscall_enter,
378 };
379
380 struct trace_event event_syscall_exit = {
381         .trace                  = print_syscall_exit,
382 };
383
384 #ifdef CONFIG_EVENT_PROFILE
385
386 static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls);
387 static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls);
388 static int sys_prof_refcount_enter;
389 static int sys_prof_refcount_exit;
390
391 static void prof_syscall_enter(struct pt_regs *regs, long id)
392 {
393         struct syscall_metadata *sys_data;
394         struct syscall_trace_enter *rec;
395         unsigned long flags;
396         char *raw_data;
397         int syscall_nr;
398         int size;
399         int cpu;
400
401         syscall_nr = syscall_get_nr(current, regs);
402         if (!test_bit(syscall_nr, enabled_prof_enter_syscalls))
403                 return;
404
405         sys_data = syscall_nr_to_meta(syscall_nr);
406         if (!sys_data)
407                 return;
408
409         /* get the size after alignment with the u32 buffer size field */
410         size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec);
411         size = ALIGN(size + sizeof(u32), sizeof(u64));
412         size -= sizeof(u32);
413
414         if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
415                       "profile buffer not large enough"))
416                 return;
417
418         /* Protect the per cpu buffer, begin the rcu read side */
419         local_irq_save(flags);
420
421         cpu = smp_processor_id();
422
423         if (in_nmi())
424                 raw_data = rcu_dereference(trace_profile_buf_nmi);
425         else
426                 raw_data = rcu_dereference(trace_profile_buf);
427
428         if (!raw_data)
429                 goto end;
430
431         raw_data = per_cpu_ptr(raw_data, cpu);
432
433         /* zero the dead bytes from align to not leak stack to user */
434         *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
435
436         rec = (struct syscall_trace_enter *) raw_data;
437         tracing_generic_entry_update(&rec->ent, 0, 0);
438         rec->ent.type = sys_data->enter_id;
439         rec->nr = syscall_nr;
440         syscall_get_arguments(current, regs, 0, sys_data->nb_args,
441                                (unsigned long *)&rec->args);
442         perf_tp_event(sys_data->enter_id, 0, 1, rec, size);
443
444 end:
445         local_irq_restore(flags);
446 }
447
448 int reg_prof_syscall_enter(char *name)
449 {
450         int ret = 0;
451         int num;
452
453         num = syscall_name_to_nr(name);
454         if (num < 0 || num >= NR_syscalls)
455                 return -ENOSYS;
456
457         mutex_lock(&syscall_trace_lock);
458         if (!sys_prof_refcount_enter)
459                 ret = register_trace_sys_enter(prof_syscall_enter);
460         if (ret) {
461                 pr_info("event trace: Could not activate"
462                                 "syscall entry trace point");
463         } else {
464                 set_bit(num, enabled_prof_enter_syscalls);
465                 sys_prof_refcount_enter++;
466         }
467         mutex_unlock(&syscall_trace_lock);
468         return ret;
469 }
470
471 void unreg_prof_syscall_enter(char *name)
472 {
473         int num;
474
475         num = syscall_name_to_nr(name);
476         if (num < 0 || num >= NR_syscalls)
477                 return;
478
479         mutex_lock(&syscall_trace_lock);
480         sys_prof_refcount_enter--;
481         clear_bit(num, enabled_prof_enter_syscalls);
482         if (!sys_prof_refcount_enter)
483                 unregister_trace_sys_enter(prof_syscall_enter);
484         mutex_unlock(&syscall_trace_lock);
485 }
486
487 static void prof_syscall_exit(struct pt_regs *regs, long ret)
488 {
489         struct syscall_metadata *sys_data;
490         struct syscall_trace_exit *rec;
491         unsigned long flags;
492         int syscall_nr;
493         char *raw_data;
494         int size;
495         int cpu;
496
497         syscall_nr = syscall_get_nr(current, regs);
498         if (!test_bit(syscall_nr, enabled_prof_exit_syscalls))
499                 return;
500
501         sys_data = syscall_nr_to_meta(syscall_nr);
502         if (!sys_data)
503                 return;
504
505         /* We can probably do that at build time */
506         size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
507         size -= sizeof(u32);
508
509         /*
510          * Impossible, but be paranoid with the future
511          * How to put this check outside runtime?
512          */
513         if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
514                 "exit event has grown above profile buffer size"))
515                 return;
516
517         /* Protect the per cpu buffer, begin the rcu read side */
518         local_irq_save(flags);
519         cpu = smp_processor_id();
520
521         if (in_nmi())
522                 raw_data = rcu_dereference(trace_profile_buf_nmi);
523         else
524                 raw_data = rcu_dereference(trace_profile_buf);
525
526         if (!raw_data)
527                 goto end;
528
529         raw_data = per_cpu_ptr(raw_data, cpu);
530
531         /* zero the dead bytes from align to not leak stack to user */
532         *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
533
534         rec = (struct syscall_trace_exit *)raw_data;
535
536         tracing_generic_entry_update(&rec->ent, 0, 0);
537         rec->ent.type = sys_data->exit_id;
538         rec->nr = syscall_nr;
539         rec->ret = syscall_get_return_value(current, regs);
540
541         perf_tp_event(sys_data->exit_id, 0, 1, rec, size);
542
543 end:
544         local_irq_restore(flags);
545 }
546
547 int reg_prof_syscall_exit(char *name)
548 {
549         int ret = 0;
550         int num;
551
552         num = syscall_name_to_nr(name);
553         if (num < 0 || num >= NR_syscalls)
554                 return -ENOSYS;
555
556         mutex_lock(&syscall_trace_lock);
557         if (!sys_prof_refcount_exit)
558                 ret = register_trace_sys_exit(prof_syscall_exit);
559         if (ret) {
560                 pr_info("event trace: Could not activate"
561                                 "syscall entry trace point");
562         } else {
563                 set_bit(num, enabled_prof_exit_syscalls);
564                 sys_prof_refcount_exit++;
565         }
566         mutex_unlock(&syscall_trace_lock);
567         return ret;
568 }
569
570 void unreg_prof_syscall_exit(char *name)
571 {
572         int num;
573
574         num = syscall_name_to_nr(name);
575         if (num < 0 || num >= NR_syscalls)
576                 return;
577
578         mutex_lock(&syscall_trace_lock);
579         sys_prof_refcount_exit--;
580         clear_bit(num, enabled_prof_exit_syscalls);
581         if (!sys_prof_refcount_exit)
582                 unregister_trace_sys_exit(prof_syscall_exit);
583         mutex_unlock(&syscall_trace_lock);
584 }
585
586 #endif
587
588