Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial
[safe/jmp/linux-2.6] / kernel / trace / trace_mmiotrace.c
1 /*
2  * Memory mapped I/O tracing
3  *
4  * Copyright (C) 2008 Pekka Paalanen <pq@iki.fi>
5  */
6
7 #define DEBUG 1
8
9 #include <linux/kernel.h>
10 #include <linux/mmiotrace.h>
11 #include <linux/pci.h>
12 #include <linux/time.h>
13
14 #include <asm/atomic.h>
15
16 #include "trace.h"
17 #include "trace_output.h"
18
19 struct header_iter {
20         struct pci_dev *dev;
21 };
22
23 static struct trace_array *mmio_trace_array;
24 static bool overrun_detected;
25 static unsigned long prev_overruns;
26 static atomic_t dropped_count;
27
28 static void mmio_reset_data(struct trace_array *tr)
29 {
30         overrun_detected = false;
31         prev_overruns = 0;
32
33         tracing_reset_online_cpus(tr);
34 }
35
36 static int mmio_trace_init(struct trace_array *tr)
37 {
38         pr_debug("in %s\n", __func__);
39         mmio_trace_array = tr;
40
41         mmio_reset_data(tr);
42         enable_mmiotrace();
43         return 0;
44 }
45
46 static void mmio_trace_reset(struct trace_array *tr)
47 {
48         pr_debug("in %s\n", __func__);
49
50         disable_mmiotrace();
51         mmio_reset_data(tr);
52         mmio_trace_array = NULL;
53 }
54
55 static void mmio_trace_start(struct trace_array *tr)
56 {
57         pr_debug("in %s\n", __func__);
58         mmio_reset_data(tr);
59 }
60
61 static int mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev)
62 {
63         int ret = 0;
64         int i;
65         resource_size_t start, end;
66         const struct pci_driver *drv = pci_dev_driver(dev);
67
68         /* XXX: incomplete checks for trace_seq_printf() return value */
69         ret += trace_seq_printf(s, "PCIDEV %02x%02x %04x%04x %x",
70                                 dev->bus->number, dev->devfn,
71                                 dev->vendor, dev->device, dev->irq);
72         /*
73          * XXX: is pci_resource_to_user() appropriate, since we are
74          * supposed to interpret the __ioremap() phys_addr argument based on
75          * these printed values?
76          */
77         for (i = 0; i < 7; i++) {
78                 pci_resource_to_user(dev, i, &dev->resource[i], &start, &end);
79                 ret += trace_seq_printf(s, " %llx",
80                         (unsigned long long)(start |
81                         (dev->resource[i].flags & PCI_REGION_FLAG_MASK)));
82         }
83         for (i = 0; i < 7; i++) {
84                 pci_resource_to_user(dev, i, &dev->resource[i], &start, &end);
85                 ret += trace_seq_printf(s, " %llx",
86                         dev->resource[i].start < dev->resource[i].end ?
87                         (unsigned long long)(end - start) + 1 : 0);
88         }
89         if (drv)
90                 ret += trace_seq_printf(s, " %s\n", drv->name);
91         else
92                 ret += trace_seq_printf(s, " \n");
93         return ret;
94 }
95
96 static void destroy_header_iter(struct header_iter *hiter)
97 {
98         if (!hiter)
99                 return;
100         pci_dev_put(hiter->dev);
101         kfree(hiter);
102 }
103
104 static void mmio_pipe_open(struct trace_iterator *iter)
105 {
106         struct header_iter *hiter;
107         struct trace_seq *s = &iter->seq;
108
109         trace_seq_printf(s, "VERSION 20070824\n");
110
111         hiter = kzalloc(sizeof(*hiter), GFP_KERNEL);
112         if (!hiter)
113                 return;
114
115         hiter->dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL);
116         iter->private = hiter;
117 }
118
119 /* XXX: This is not called when the pipe is closed! */
120 static void mmio_close(struct trace_iterator *iter)
121 {
122         struct header_iter *hiter = iter->private;
123         destroy_header_iter(hiter);
124         iter->private = NULL;
125 }
126
127 static unsigned long count_overruns(struct trace_iterator *iter)
128 {
129         unsigned long cnt = atomic_xchg(&dropped_count, 0);
130         unsigned long over = ring_buffer_overruns(iter->tr->buffer);
131
132         if (over > prev_overruns)
133                 cnt += over - prev_overruns;
134         prev_overruns = over;
135         return cnt;
136 }
137
138 static ssize_t mmio_read(struct trace_iterator *iter, struct file *filp,
139                                 char __user *ubuf, size_t cnt, loff_t *ppos)
140 {
141         ssize_t ret;
142         struct header_iter *hiter = iter->private;
143         struct trace_seq *s = &iter->seq;
144         unsigned long n;
145
146         n = count_overruns(iter);
147         if (n) {
148                 /* XXX: This is later than where events were lost. */
149                 trace_seq_printf(s, "MARK 0.000000 Lost %lu events.\n", n);
150                 if (!overrun_detected)
151                         pr_warning("mmiotrace has lost events.\n");
152                 overrun_detected = true;
153                 goto print_out;
154         }
155
156         if (!hiter)
157                 return 0;
158
159         mmio_print_pcidev(s, hiter->dev);
160         hiter->dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, hiter->dev);
161
162         if (!hiter->dev) {
163                 destroy_header_iter(hiter);
164                 iter->private = NULL;
165         }
166
167 print_out:
168         ret = trace_seq_to_user(s, ubuf, cnt);
169         return (ret == -EBUSY) ? 0 : ret;
170 }
171
172 static enum print_line_t mmio_print_rw(struct trace_iterator *iter)
173 {
174         struct trace_entry *entry = iter->ent;
175         struct trace_mmiotrace_rw *field;
176         struct mmiotrace_rw *rw;
177         struct trace_seq *s     = &iter->seq;
178         unsigned long long t    = ns2usecs(iter->ts);
179         unsigned long usec_rem  = do_div(t, USEC_PER_SEC);
180         unsigned secs           = (unsigned long)t;
181         int ret = 1;
182
183         trace_assign_type(field, entry);
184         rw = &field->rw;
185
186         switch (rw->opcode) {
187         case MMIO_READ:
188                 ret = trace_seq_printf(s,
189                         "R %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n",
190                         rw->width, secs, usec_rem, rw->map_id,
191                         (unsigned long long)rw->phys,
192                         rw->value, rw->pc, 0);
193                 break;
194         case MMIO_WRITE:
195                 ret = trace_seq_printf(s,
196                         "W %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n",
197                         rw->width, secs, usec_rem, rw->map_id,
198                         (unsigned long long)rw->phys,
199                         rw->value, rw->pc, 0);
200                 break;
201         case MMIO_UNKNOWN_OP:
202                 ret = trace_seq_printf(s,
203                         "UNKNOWN %u.%06lu %d 0x%llx %02lx,%02lx,"
204                         "%02lx 0x%lx %d\n",
205                         secs, usec_rem, rw->map_id,
206                         (unsigned long long)rw->phys,
207                         (rw->value >> 16) & 0xff, (rw->value >> 8) & 0xff,
208                         (rw->value >> 0) & 0xff, rw->pc, 0);
209                 break;
210         default:
211                 ret = trace_seq_printf(s, "rw what?\n");
212                 break;
213         }
214         if (ret)
215                 return TRACE_TYPE_HANDLED;
216         return TRACE_TYPE_PARTIAL_LINE;
217 }
218
219 static enum print_line_t mmio_print_map(struct trace_iterator *iter)
220 {
221         struct trace_entry *entry = iter->ent;
222         struct trace_mmiotrace_map *field;
223         struct mmiotrace_map *m;
224         struct trace_seq *s     = &iter->seq;
225         unsigned long long t    = ns2usecs(iter->ts);
226         unsigned long usec_rem  = do_div(t, USEC_PER_SEC);
227         unsigned secs           = (unsigned long)t;
228         int ret;
229
230         trace_assign_type(field, entry);
231         m = &field->map;
232
233         switch (m->opcode) {
234         case MMIO_PROBE:
235                 ret = trace_seq_printf(s,
236                         "MAP %u.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n",
237                         secs, usec_rem, m->map_id,
238                         (unsigned long long)m->phys, m->virt, m->len,
239                         0UL, 0);
240                 break;
241         case MMIO_UNPROBE:
242                 ret = trace_seq_printf(s,
243                         "UNMAP %u.%06lu %d 0x%lx %d\n",
244                         secs, usec_rem, m->map_id, 0UL, 0);
245                 break;
246         default:
247                 ret = trace_seq_printf(s, "map what?\n");
248                 break;
249         }
250         if (ret)
251                 return TRACE_TYPE_HANDLED;
252         return TRACE_TYPE_PARTIAL_LINE;
253 }
254
255 static enum print_line_t mmio_print_mark(struct trace_iterator *iter)
256 {
257         struct trace_entry *entry = iter->ent;
258         struct print_entry *print = (struct print_entry *)entry;
259         const char *msg         = print->buf;
260         struct trace_seq *s     = &iter->seq;
261         unsigned long long t    = ns2usecs(iter->ts);
262         unsigned long usec_rem  = do_div(t, USEC_PER_SEC);
263         unsigned secs           = (unsigned long)t;
264         int ret;
265
266         /* The trailing newline must be in the message. */
267         ret = trace_seq_printf(s, "MARK %u.%06lu %s", secs, usec_rem, msg);
268         if (!ret)
269                 return TRACE_TYPE_PARTIAL_LINE;
270
271         return TRACE_TYPE_HANDLED;
272 }
273
274 static enum print_line_t mmio_print_line(struct trace_iterator *iter)
275 {
276         switch (iter->ent->type) {
277         case TRACE_MMIO_RW:
278                 return mmio_print_rw(iter);
279         case TRACE_MMIO_MAP:
280                 return mmio_print_map(iter);
281         case TRACE_PRINT:
282                 return mmio_print_mark(iter);
283         default:
284                 return TRACE_TYPE_HANDLED; /* ignore unknown entries */
285         }
286 }
287
288 static struct tracer mmio_tracer __read_mostly =
289 {
290         .name           = "mmiotrace",
291         .init           = mmio_trace_init,
292         .reset          = mmio_trace_reset,
293         .start          = mmio_trace_start,
294         .pipe_open      = mmio_pipe_open,
295         .close          = mmio_close,
296         .read           = mmio_read,
297         .print_line     = mmio_print_line,
298 };
299
300 __init static int init_mmio_trace(void)
301 {
302         return register_tracer(&mmio_tracer);
303 }
304 device_initcall(init_mmio_trace);
305
306 static void __trace_mmiotrace_rw(struct trace_array *tr,
307                                 struct trace_array_cpu *data,
308                                 struct mmiotrace_rw *rw)
309 {
310         struct ftrace_event_call *call = &event_mmiotrace_rw;
311         struct ring_buffer *buffer = tr->buffer;
312         struct ring_buffer_event *event;
313         struct trace_mmiotrace_rw *entry;
314         int pc = preempt_count();
315
316         event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
317                                           sizeof(*entry), 0, pc);
318         if (!event) {
319                 atomic_inc(&dropped_count);
320                 return;
321         }
322         entry   = ring_buffer_event_data(event);
323         entry->rw                       = *rw;
324
325         if (!filter_check_discard(call, entry, buffer, event))
326                 trace_buffer_unlock_commit(buffer, event, 0, pc);
327 }
328
329 void mmio_trace_rw(struct mmiotrace_rw *rw)
330 {
331         struct trace_array *tr = mmio_trace_array;
332         struct trace_array_cpu *data = tr->data[smp_processor_id()];
333         __trace_mmiotrace_rw(tr, data, rw);
334 }
335
336 static void __trace_mmiotrace_map(struct trace_array *tr,
337                                 struct trace_array_cpu *data,
338                                 struct mmiotrace_map *map)
339 {
340         struct ftrace_event_call *call = &event_mmiotrace_map;
341         struct ring_buffer *buffer = tr->buffer;
342         struct ring_buffer_event *event;
343         struct trace_mmiotrace_map *entry;
344         int pc = preempt_count();
345
346         event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
347                                           sizeof(*entry), 0, pc);
348         if (!event) {
349                 atomic_inc(&dropped_count);
350                 return;
351         }
352         entry   = ring_buffer_event_data(event);
353         entry->map                      = *map;
354
355         if (!filter_check_discard(call, entry, buffer, event))
356                 trace_buffer_unlock_commit(buffer, event, 0, pc);
357 }
358
359 void mmio_trace_mapping(struct mmiotrace_map *map)
360 {
361         struct trace_array *tr = mmio_trace_array;
362         struct trace_array_cpu *data;
363
364         preempt_disable();
365         data = tr->data[smp_processor_id()];
366         __trace_mmiotrace_map(tr, data, map);
367         preempt_enable();
368 }
369
370 int mmio_trace_printk(const char *fmt, va_list args)
371 {
372         return trace_vprintk(0, fmt, args);
373 }