tty: Fix the ldisc hangup race
[safe/jmp/linux-2.6] / kernel / trace / trace_event_profile.c
1 /*
2  * trace event based perf counter profiling
3  *
4  * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
5  *
6  */
7
8 #include <linux/module.h>
9 #include <linux/kprobes.h>
10 #include "trace.h"
11
12
13 static char *perf_trace_buf;
14 static char *perf_trace_buf_nmi;
15
16 typedef typeof(char [FTRACE_MAX_PROFILE_SIZE]) perf_trace_t ;
17
18 /* Count the events in use (per event id, not per instance) */
19 static int      total_profile_count;
20
21 static int ftrace_profile_enable_event(struct ftrace_event_call *event)
22 {
23         char *buf;
24         int ret = -ENOMEM;
25
26         if (event->profile_count++ > 0)
27                 return 0;
28
29         if (!total_profile_count) {
30                 buf = (char *)alloc_percpu(perf_trace_t);
31                 if (!buf)
32                         goto fail_buf;
33
34                 rcu_assign_pointer(perf_trace_buf, buf);
35
36                 buf = (char *)alloc_percpu(perf_trace_t);
37                 if (!buf)
38                         goto fail_buf_nmi;
39
40                 rcu_assign_pointer(perf_trace_buf_nmi, buf);
41         }
42
43         ret = event->profile_enable(event);
44         if (!ret) {
45                 total_profile_count++;
46                 return 0;
47         }
48
49 fail_buf_nmi:
50         if (!total_profile_count) {
51                 free_percpu(perf_trace_buf_nmi);
52                 free_percpu(perf_trace_buf);
53                 perf_trace_buf_nmi = NULL;
54                 perf_trace_buf = NULL;
55         }
56 fail_buf:
57         event->profile_count--;
58
59         return ret;
60 }
61
62 int ftrace_profile_enable(int event_id)
63 {
64         struct ftrace_event_call *event;
65         int ret = -EINVAL;
66
67         mutex_lock(&event_mutex);
68         list_for_each_entry(event, &ftrace_events, list) {
69                 if (event->id == event_id && event->profile_enable &&
70                     try_module_get(event->mod)) {
71                         ret = ftrace_profile_enable_event(event);
72                         break;
73                 }
74         }
75         mutex_unlock(&event_mutex);
76
77         return ret;
78 }
79
80 static void ftrace_profile_disable_event(struct ftrace_event_call *event)
81 {
82         char *buf, *nmi_buf;
83
84         if (--event->profile_count > 0)
85                 return;
86
87         event->profile_disable(event);
88
89         if (!--total_profile_count) {
90                 buf = perf_trace_buf;
91                 rcu_assign_pointer(perf_trace_buf, NULL);
92
93                 nmi_buf = perf_trace_buf_nmi;
94                 rcu_assign_pointer(perf_trace_buf_nmi, NULL);
95
96                 /*
97                  * Ensure every events in profiling have finished before
98                  * releasing the buffers
99                  */
100                 synchronize_sched();
101
102                 free_percpu(buf);
103                 free_percpu(nmi_buf);
104         }
105 }
106
107 void ftrace_profile_disable(int event_id)
108 {
109         struct ftrace_event_call *event;
110
111         mutex_lock(&event_mutex);
112         list_for_each_entry(event, &ftrace_events, list) {
113                 if (event->id == event_id) {
114                         ftrace_profile_disable_event(event);
115                         module_put(event->mod);
116                         break;
117                 }
118         }
119         mutex_unlock(&event_mutex);
120 }
121
122 __kprobes void *ftrace_perf_buf_prepare(int size, unsigned short type,
123                                         int *rctxp, unsigned long *irq_flags)
124 {
125         struct trace_entry *entry;
126         char *trace_buf, *raw_data;
127         int pc, cpu;
128
129         pc = preempt_count();
130
131         /* Protect the per cpu buffer, begin the rcu read side */
132         local_irq_save(*irq_flags);
133
134         *rctxp = perf_swevent_get_recursion_context();
135         if (*rctxp < 0)
136                 goto err_recursion;
137
138         cpu = smp_processor_id();
139
140         if (in_nmi())
141                 trace_buf = rcu_dereference(perf_trace_buf_nmi);
142         else
143                 trace_buf = rcu_dereference(perf_trace_buf);
144
145         if (!trace_buf)
146                 goto err;
147
148         raw_data = per_cpu_ptr(trace_buf, cpu);
149
150         /* zero the dead bytes from align to not leak stack to user */
151         *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
152
153         entry = (struct trace_entry *)raw_data;
154         tracing_generic_entry_update(entry, *irq_flags, pc);
155         entry->type = type;
156
157         return raw_data;
158 err:
159         perf_swevent_put_recursion_context(*rctxp);
160 err_recursion:
161         local_irq_restore(*irq_flags);
162         return NULL;
163 }
164 EXPORT_SYMBOL_GPL(ftrace_perf_buf_prepare);