tracing/events: fix output format of user stack
[safe/jmp/linux-2.6] / kernel / trace / ring_buffer.c
1 /*
2  * Generic ring buffer
3  *
4  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5  */
6 #include <linux/ring_buffer.h>
7 #include <linux/trace_clock.h>
8 #include <linux/ftrace_irq.h>
9 #include <linux/spinlock.h>
10 #include <linux/debugfs.h>
11 #include <linux/uaccess.h>
12 #include <linux/hardirq.h>
13 #include <linux/module.h>
14 #include <linux/percpu.h>
15 #include <linux/mutex.h>
16 #include <linux/init.h>
17 #include <linux/hash.h>
18 #include <linux/list.h>
19 #include <linux/cpu.h>
20 #include <linux/fs.h>
21
22 #include "trace.h"
23
24 /*
25  * The ring buffer header is special. We must manually up keep it.
26  */
27 int ring_buffer_print_entry_header(struct trace_seq *s)
28 {
29         int ret;
30
31         ret = trace_seq_printf(s, "# compressed entry header\n");
32         ret = trace_seq_printf(s, "\ttype_len    :    5 bits\n");
33         ret = trace_seq_printf(s, "\ttime_delta  :   27 bits\n");
34         ret = trace_seq_printf(s, "\tarray       :   32 bits\n");
35         ret = trace_seq_printf(s, "\n");
36         ret = trace_seq_printf(s, "\tpadding     : type == %d\n",
37                                RINGBUF_TYPE_PADDING);
38         ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
39                                RINGBUF_TYPE_TIME_EXTEND);
40         ret = trace_seq_printf(s, "\tdata max type_len  == %d\n",
41                                RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
42
43         return ret;
44 }
45
46 /*
47  * The ring buffer is made up of a list of pages. A separate list of pages is
48  * allocated for each CPU. A writer may only write to a buffer that is
49  * associated with the CPU it is currently executing on.  A reader may read
50  * from any per cpu buffer.
51  *
52  * The reader is special. For each per cpu buffer, the reader has its own
53  * reader page. When a reader has read the entire reader page, this reader
54  * page is swapped with another page in the ring buffer.
55  *
56  * Now, as long as the writer is off the reader page, the reader can do what
57  * ever it wants with that page. The writer will never write to that page
58  * again (as long as it is out of the ring buffer).
59  *
60  * Here's some silly ASCII art.
61  *
62  *   +------+
63  *   |reader|          RING BUFFER
64  *   |page  |
65  *   +------+        +---+   +---+   +---+
66  *                   |   |-->|   |-->|   |
67  *                   +---+   +---+   +---+
68  *                     ^               |
69  *                     |               |
70  *                     +---------------+
71  *
72  *
73  *   +------+
74  *   |reader|          RING BUFFER
75  *   |page  |------------------v
76  *   +------+        +---+   +---+   +---+
77  *                   |   |-->|   |-->|   |
78  *                   +---+   +---+   +---+
79  *                     ^               |
80  *                     |               |
81  *                     +---------------+
82  *
83  *
84  *   +------+
85  *   |reader|          RING BUFFER
86  *   |page  |------------------v
87  *   +------+        +---+   +---+   +---+
88  *      ^            |   |-->|   |-->|   |
89  *      |            +---+   +---+   +---+
90  *      |                              |
91  *      |                              |
92  *      +------------------------------+
93  *
94  *
95  *   +------+
96  *   |buffer|          RING BUFFER
97  *   |page  |------------------v
98  *   +------+        +---+   +---+   +---+
99  *      ^            |   |   |   |-->|   |
100  *      |   New      +---+   +---+   +---+
101  *      |  Reader------^               |
102  *      |   page                       |
103  *      +------------------------------+
104  *
105  *
106  * After we make this swap, the reader can hand this page off to the splice
107  * code and be done with it. It can even allocate a new page if it needs to
108  * and swap that into the ring buffer.
109  *
110  * We will be using cmpxchg soon to make all this lockless.
111  *
112  */
113
114 /*
115  * A fast way to enable or disable all ring buffers is to
116  * call tracing_on or tracing_off. Turning off the ring buffers
117  * prevents all ring buffers from being recorded to.
118  * Turning this switch on, makes it OK to write to the
119  * ring buffer, if the ring buffer is enabled itself.
120  *
121  * There's three layers that must be on in order to write
122  * to the ring buffer.
123  *
124  * 1) This global flag must be set.
125  * 2) The ring buffer must be enabled for recording.
126  * 3) The per cpu buffer must be enabled for recording.
127  *
128  * In case of an anomaly, this global flag has a bit set that
129  * will permantly disable all ring buffers.
130  */
131
132 /*
133  * Global flag to disable all recording to ring buffers
134  *  This has two bits: ON, DISABLED
135  *
136  *  ON   DISABLED
137  * ---- ----------
138  *   0      0        : ring buffers are off
139  *   1      0        : ring buffers are on
140  *   X      1        : ring buffers are permanently disabled
141  */
142
143 enum {
144         RB_BUFFERS_ON_BIT       = 0,
145         RB_BUFFERS_DISABLED_BIT = 1,
146 };
147
148 enum {
149         RB_BUFFERS_ON           = 1 << RB_BUFFERS_ON_BIT,
150         RB_BUFFERS_DISABLED     = 1 << RB_BUFFERS_DISABLED_BIT,
151 };
152
153 static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
154
155 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
156
157 /**
158  * tracing_on - enable all tracing buffers
159  *
160  * This function enables all tracing buffers that may have been
161  * disabled with tracing_off.
162  */
163 void tracing_on(void)
164 {
165         set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
166 }
167 EXPORT_SYMBOL_GPL(tracing_on);
168
169 /**
170  * tracing_off - turn off all tracing buffers
171  *
172  * This function stops all tracing buffers from recording data.
173  * It does not disable any overhead the tracers themselves may
174  * be causing. This function simply causes all recording to
175  * the ring buffers to fail.
176  */
177 void tracing_off(void)
178 {
179         clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
180 }
181 EXPORT_SYMBOL_GPL(tracing_off);
182
183 /**
184  * tracing_off_permanent - permanently disable ring buffers
185  *
186  * This function, once called, will disable all ring buffers
187  * permanently.
188  */
189 void tracing_off_permanent(void)
190 {
191         set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
192 }
193
194 /**
195  * tracing_is_on - show state of ring buffers enabled
196  */
197 int tracing_is_on(void)
198 {
199         return ring_buffer_flags == RB_BUFFERS_ON;
200 }
201 EXPORT_SYMBOL_GPL(tracing_is_on);
202
203 #include "trace.h"
204
205 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
206 #define RB_ALIGNMENT            4U
207 #define RB_MAX_SMALL_DATA       (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
208
209 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
210 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
211
212 enum {
213         RB_LEN_TIME_EXTEND = 8,
214         RB_LEN_TIME_STAMP = 16,
215 };
216
217 static inline int rb_null_event(struct ring_buffer_event *event)
218 {
219         return event->type_len == RINGBUF_TYPE_PADDING
220                         && event->time_delta == 0;
221 }
222
223 static inline int rb_discarded_event(struct ring_buffer_event *event)
224 {
225         return event->type_len == RINGBUF_TYPE_PADDING && event->time_delta;
226 }
227
228 static void rb_event_set_padding(struct ring_buffer_event *event)
229 {
230         event->type_len = RINGBUF_TYPE_PADDING;
231         event->time_delta = 0;
232 }
233
234 static unsigned
235 rb_event_data_length(struct ring_buffer_event *event)
236 {
237         unsigned length;
238
239         if (event->type_len)
240                 length = event->type_len * RB_ALIGNMENT;
241         else
242                 length = event->array[0];
243         return length + RB_EVNT_HDR_SIZE;
244 }
245
246 /* inline for ring buffer fast paths */
247 static unsigned
248 rb_event_length(struct ring_buffer_event *event)
249 {
250         switch (event->type_len) {
251         case RINGBUF_TYPE_PADDING:
252                 if (rb_null_event(event))
253                         /* undefined */
254                         return -1;
255                 return  event->array[0] + RB_EVNT_HDR_SIZE;
256
257         case RINGBUF_TYPE_TIME_EXTEND:
258                 return RB_LEN_TIME_EXTEND;
259
260         case RINGBUF_TYPE_TIME_STAMP:
261                 return RB_LEN_TIME_STAMP;
262
263         case RINGBUF_TYPE_DATA:
264                 return rb_event_data_length(event);
265         default:
266                 BUG();
267         }
268         /* not hit */
269         return 0;
270 }
271
272 /**
273  * ring_buffer_event_length - return the length of the event
274  * @event: the event to get the length of
275  */
276 unsigned ring_buffer_event_length(struct ring_buffer_event *event)
277 {
278         unsigned length = rb_event_length(event);
279         if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
280                 return length;
281         length -= RB_EVNT_HDR_SIZE;
282         if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
283                 length -= sizeof(event->array[0]);
284         return length;
285 }
286 EXPORT_SYMBOL_GPL(ring_buffer_event_length);
287
288 /* inline for ring buffer fast paths */
289 static void *
290 rb_event_data(struct ring_buffer_event *event)
291 {
292         BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
293         /* If length is in len field, then array[0] has the data */
294         if (event->type_len)
295                 return (void *)&event->array[0];
296         /* Otherwise length is in array[0] and array[1] has the data */
297         return (void *)&event->array[1];
298 }
299
300 /**
301  * ring_buffer_event_data - return the data of the event
302  * @event: the event to get the data from
303  */
304 void *ring_buffer_event_data(struct ring_buffer_event *event)
305 {
306         return rb_event_data(event);
307 }
308 EXPORT_SYMBOL_GPL(ring_buffer_event_data);
309
310 #define for_each_buffer_cpu(buffer, cpu)                \
311         for_each_cpu(cpu, buffer->cpumask)
312
313 #define TS_SHIFT        27
314 #define TS_MASK         ((1ULL << TS_SHIFT) - 1)
315 #define TS_DELTA_TEST   (~TS_MASK)
316
317 struct buffer_data_page {
318         u64              time_stamp;    /* page time stamp */
319         local_t          commit;        /* write committed index */
320         unsigned char    data[];        /* data of buffer page */
321 };
322
323 struct buffer_page {
324         struct list_head list;          /* list of buffer pages */
325         local_t          write;         /* index for next write */
326         unsigned         read;          /* index for next read */
327         local_t          entries;       /* entries on this page */
328         struct buffer_data_page *page;  /* Actual data page */
329 };
330
331 static void rb_init_page(struct buffer_data_page *bpage)
332 {
333         local_set(&bpage->commit, 0);
334 }
335
336 /**
337  * ring_buffer_page_len - the size of data on the page.
338  * @page: The page to read
339  *
340  * Returns the amount of data on the page, including buffer page header.
341  */
342 size_t ring_buffer_page_len(void *page)
343 {
344         return local_read(&((struct buffer_data_page *)page)->commit)
345                 + BUF_PAGE_HDR_SIZE;
346 }
347
348 /*
349  * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
350  * this issue out.
351  */
352 static void free_buffer_page(struct buffer_page *bpage)
353 {
354         free_page((unsigned long)bpage->page);
355         kfree(bpage);
356 }
357
358 /*
359  * We need to fit the time_stamp delta into 27 bits.
360  */
361 static inline int test_time_stamp(u64 delta)
362 {
363         if (delta & TS_DELTA_TEST)
364                 return 1;
365         return 0;
366 }
367
368 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
369
370 /* Max payload is BUF_PAGE_SIZE - header (8bytes) */
371 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
372
373 /* Max number of timestamps that can fit on a page */
374 #define RB_TIMESTAMPS_PER_PAGE  (BUF_PAGE_SIZE / RB_LEN_TIME_STAMP)
375
376 int ring_buffer_print_page_header(struct trace_seq *s)
377 {
378         struct buffer_data_page field;
379         int ret;
380
381         ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
382                                "offset:0;\tsize:%u;\n",
383                                (unsigned int)sizeof(field.time_stamp));
384
385         ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
386                                "offset:%u;\tsize:%u;\n",
387                                (unsigned int)offsetof(typeof(field), commit),
388                                (unsigned int)sizeof(field.commit));
389
390         ret = trace_seq_printf(s, "\tfield: char data;\t"
391                                "offset:%u;\tsize:%u;\n",
392                                (unsigned int)offsetof(typeof(field), data),
393                                (unsigned int)BUF_PAGE_SIZE);
394
395         return ret;
396 }
397
398 /*
399  * head_page == tail_page && head == tail then buffer is empty.
400  */
401 struct ring_buffer_per_cpu {
402         int                             cpu;
403         struct ring_buffer              *buffer;
404         spinlock_t                      reader_lock; /* serialize readers */
405         raw_spinlock_t                  lock;
406         struct lock_class_key           lock_key;
407         struct list_head                pages;
408         struct buffer_page              *head_page;     /* read from head */
409         struct buffer_page              *tail_page;     /* write to tail */
410         struct buffer_page              *commit_page;   /* committed pages */
411         struct buffer_page              *reader_page;
412         unsigned long                   nmi_dropped;
413         unsigned long                   commit_overrun;
414         unsigned long                   overrun;
415         unsigned long                   read;
416         local_t                         entries;
417         u64                             write_stamp;
418         u64                             read_stamp;
419         atomic_t                        record_disabled;
420 };
421
422 struct ring_buffer {
423         unsigned                        pages;
424         unsigned                        flags;
425         int                             cpus;
426         atomic_t                        record_disabled;
427         cpumask_var_t                   cpumask;
428
429         struct mutex                    mutex;
430
431         struct ring_buffer_per_cpu      **buffers;
432
433 #ifdef CONFIG_HOTPLUG_CPU
434         struct notifier_block           cpu_notify;
435 #endif
436         u64                             (*clock)(void);
437 };
438
439 struct ring_buffer_iter {
440         struct ring_buffer_per_cpu      *cpu_buffer;
441         unsigned long                   head;
442         struct buffer_page              *head_page;
443         u64                             read_stamp;
444 };
445
446 /* buffer may be either ring_buffer or ring_buffer_per_cpu */
447 #define RB_WARN_ON(buffer, cond)                                \
448         ({                                                      \
449                 int _____ret = unlikely(cond);                  \
450                 if (_____ret) {                                 \
451                         atomic_inc(&buffer->record_disabled);   \
452                         WARN_ON(1);                             \
453                 }                                               \
454                 _____ret;                                       \
455         })
456
457 /* Up this if you want to test the TIME_EXTENTS and normalization */
458 #define DEBUG_SHIFT 0
459
460 static inline u64 rb_time_stamp(struct ring_buffer *buffer, int cpu)
461 {
462         /* shift to debug/test normalization and TIME_EXTENTS */
463         return buffer->clock() << DEBUG_SHIFT;
464 }
465
466 u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
467 {
468         u64 time;
469
470         preempt_disable_notrace();
471         time = rb_time_stamp(buffer, cpu);
472         preempt_enable_no_resched_notrace();
473
474         return time;
475 }
476 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
477
478 void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
479                                       int cpu, u64 *ts)
480 {
481         /* Just stupid testing the normalize function and deltas */
482         *ts >>= DEBUG_SHIFT;
483 }
484 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
485
486 /**
487  * check_pages - integrity check of buffer pages
488  * @cpu_buffer: CPU buffer with pages to test
489  *
490  * As a safety measure we check to make sure the data pages have not
491  * been corrupted.
492  */
493 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
494 {
495         struct list_head *head = &cpu_buffer->pages;
496         struct buffer_page *bpage, *tmp;
497
498         if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
499                 return -1;
500         if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
501                 return -1;
502
503         list_for_each_entry_safe(bpage, tmp, head, list) {
504                 if (RB_WARN_ON(cpu_buffer,
505                                bpage->list.next->prev != &bpage->list))
506                         return -1;
507                 if (RB_WARN_ON(cpu_buffer,
508                                bpage->list.prev->next != &bpage->list))
509                         return -1;
510         }
511
512         return 0;
513 }
514
515 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
516                              unsigned nr_pages)
517 {
518         struct list_head *head = &cpu_buffer->pages;
519         struct buffer_page *bpage, *tmp;
520         unsigned long addr;
521         LIST_HEAD(pages);
522         unsigned i;
523
524         for (i = 0; i < nr_pages; i++) {
525                 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
526                                     GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
527                 if (!bpage)
528                         goto free_pages;
529                 list_add(&bpage->list, &pages);
530
531                 addr = __get_free_page(GFP_KERNEL);
532                 if (!addr)
533                         goto free_pages;
534                 bpage->page = (void *)addr;
535                 rb_init_page(bpage->page);
536         }
537
538         list_splice(&pages, head);
539
540         rb_check_pages(cpu_buffer);
541
542         return 0;
543
544  free_pages:
545         list_for_each_entry_safe(bpage, tmp, &pages, list) {
546                 list_del_init(&bpage->list);
547                 free_buffer_page(bpage);
548         }
549         return -ENOMEM;
550 }
551
552 static struct ring_buffer_per_cpu *
553 rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
554 {
555         struct ring_buffer_per_cpu *cpu_buffer;
556         struct buffer_page *bpage;
557         unsigned long addr;
558         int ret;
559
560         cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
561                                   GFP_KERNEL, cpu_to_node(cpu));
562         if (!cpu_buffer)
563                 return NULL;
564
565         cpu_buffer->cpu = cpu;
566         cpu_buffer->buffer = buffer;
567         spin_lock_init(&cpu_buffer->reader_lock);
568         cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
569         INIT_LIST_HEAD(&cpu_buffer->pages);
570
571         bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
572                             GFP_KERNEL, cpu_to_node(cpu));
573         if (!bpage)
574                 goto fail_free_buffer;
575
576         cpu_buffer->reader_page = bpage;
577         addr = __get_free_page(GFP_KERNEL);
578         if (!addr)
579                 goto fail_free_reader;
580         bpage->page = (void *)addr;
581         rb_init_page(bpage->page);
582
583         INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
584
585         ret = rb_allocate_pages(cpu_buffer, buffer->pages);
586         if (ret < 0)
587                 goto fail_free_reader;
588
589         cpu_buffer->head_page
590                 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
591         cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
592
593         return cpu_buffer;
594
595  fail_free_reader:
596         free_buffer_page(cpu_buffer->reader_page);
597
598  fail_free_buffer:
599         kfree(cpu_buffer);
600         return NULL;
601 }
602
603 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
604 {
605         struct list_head *head = &cpu_buffer->pages;
606         struct buffer_page *bpage, *tmp;
607
608         free_buffer_page(cpu_buffer->reader_page);
609
610         list_for_each_entry_safe(bpage, tmp, head, list) {
611                 list_del_init(&bpage->list);
612                 free_buffer_page(bpage);
613         }
614         kfree(cpu_buffer);
615 }
616
617 /*
618  * Causes compile errors if the struct buffer_page gets bigger
619  * than the struct page.
620  */
621 extern int ring_buffer_page_too_big(void);
622
623 #ifdef CONFIG_HOTPLUG_CPU
624 static int rb_cpu_notify(struct notifier_block *self,
625                          unsigned long action, void *hcpu);
626 #endif
627
628 /**
629  * ring_buffer_alloc - allocate a new ring_buffer
630  * @size: the size in bytes per cpu that is needed.
631  * @flags: attributes to set for the ring buffer.
632  *
633  * Currently the only flag that is available is the RB_FL_OVERWRITE
634  * flag. This flag means that the buffer will overwrite old data
635  * when the buffer wraps. If this flag is not set, the buffer will
636  * drop data when the tail hits the head.
637  */
638 struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
639 {
640         struct ring_buffer *buffer;
641         int bsize;
642         int cpu;
643
644         /* Paranoid! Optimizes out when all is well */
645         if (sizeof(struct buffer_page) > sizeof(struct page))
646                 ring_buffer_page_too_big();
647
648
649         /* keep it in its own cache line */
650         buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
651                          GFP_KERNEL);
652         if (!buffer)
653                 return NULL;
654
655         if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
656                 goto fail_free_buffer;
657
658         buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
659         buffer->flags = flags;
660         buffer->clock = trace_clock_local;
661
662         /* need at least two pages */
663         if (buffer->pages == 1)
664                 buffer->pages++;
665
666         /*
667          * In case of non-hotplug cpu, if the ring-buffer is allocated
668          * in early initcall, it will not be notified of secondary cpus.
669          * In that off case, we need to allocate for all possible cpus.
670          */
671 #ifdef CONFIG_HOTPLUG_CPU
672         get_online_cpus();
673         cpumask_copy(buffer->cpumask, cpu_online_mask);
674 #else
675         cpumask_copy(buffer->cpumask, cpu_possible_mask);
676 #endif
677         buffer->cpus = nr_cpu_ids;
678
679         bsize = sizeof(void *) * nr_cpu_ids;
680         buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
681                                   GFP_KERNEL);
682         if (!buffer->buffers)
683                 goto fail_free_cpumask;
684
685         for_each_buffer_cpu(buffer, cpu) {
686                 buffer->buffers[cpu] =
687                         rb_allocate_cpu_buffer(buffer, cpu);
688                 if (!buffer->buffers[cpu])
689                         goto fail_free_buffers;
690         }
691
692 #ifdef CONFIG_HOTPLUG_CPU
693         buffer->cpu_notify.notifier_call = rb_cpu_notify;
694         buffer->cpu_notify.priority = 0;
695         register_cpu_notifier(&buffer->cpu_notify);
696 #endif
697
698         put_online_cpus();
699         mutex_init(&buffer->mutex);
700
701         return buffer;
702
703  fail_free_buffers:
704         for_each_buffer_cpu(buffer, cpu) {
705                 if (buffer->buffers[cpu])
706                         rb_free_cpu_buffer(buffer->buffers[cpu]);
707         }
708         kfree(buffer->buffers);
709
710  fail_free_cpumask:
711         free_cpumask_var(buffer->cpumask);
712         put_online_cpus();
713
714  fail_free_buffer:
715         kfree(buffer);
716         return NULL;
717 }
718 EXPORT_SYMBOL_GPL(ring_buffer_alloc);
719
720 /**
721  * ring_buffer_free - free a ring buffer.
722  * @buffer: the buffer to free.
723  */
724 void
725 ring_buffer_free(struct ring_buffer *buffer)
726 {
727         int cpu;
728
729         get_online_cpus();
730
731 #ifdef CONFIG_HOTPLUG_CPU
732         unregister_cpu_notifier(&buffer->cpu_notify);
733 #endif
734
735         for_each_buffer_cpu(buffer, cpu)
736                 rb_free_cpu_buffer(buffer->buffers[cpu]);
737
738         put_online_cpus();
739
740         free_cpumask_var(buffer->cpumask);
741
742         kfree(buffer);
743 }
744 EXPORT_SYMBOL_GPL(ring_buffer_free);
745
746 void ring_buffer_set_clock(struct ring_buffer *buffer,
747                            u64 (*clock)(void))
748 {
749         buffer->clock = clock;
750 }
751
752 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
753
754 static void
755 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
756 {
757         struct buffer_page *bpage;
758         struct list_head *p;
759         unsigned i;
760
761         atomic_inc(&cpu_buffer->record_disabled);
762         synchronize_sched();
763
764         for (i = 0; i < nr_pages; i++) {
765                 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
766                         return;
767                 p = cpu_buffer->pages.next;
768                 bpage = list_entry(p, struct buffer_page, list);
769                 list_del_init(&bpage->list);
770                 free_buffer_page(bpage);
771         }
772         if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
773                 return;
774
775         rb_reset_cpu(cpu_buffer);
776
777         rb_check_pages(cpu_buffer);
778
779         atomic_dec(&cpu_buffer->record_disabled);
780
781 }
782
783 static void
784 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
785                 struct list_head *pages, unsigned nr_pages)
786 {
787         struct buffer_page *bpage;
788         struct list_head *p;
789         unsigned i;
790
791         atomic_inc(&cpu_buffer->record_disabled);
792         synchronize_sched();
793
794         for (i = 0; i < nr_pages; i++) {
795                 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
796                         return;
797                 p = pages->next;
798                 bpage = list_entry(p, struct buffer_page, list);
799                 list_del_init(&bpage->list);
800                 list_add_tail(&bpage->list, &cpu_buffer->pages);
801         }
802         rb_reset_cpu(cpu_buffer);
803
804         rb_check_pages(cpu_buffer);
805
806         atomic_dec(&cpu_buffer->record_disabled);
807 }
808
809 /**
810  * ring_buffer_resize - resize the ring buffer
811  * @buffer: the buffer to resize.
812  * @size: the new size.
813  *
814  * The tracer is responsible for making sure that the buffer is
815  * not being used while changing the size.
816  * Note: We may be able to change the above requirement by using
817  *  RCU synchronizations.
818  *
819  * Minimum size is 2 * BUF_PAGE_SIZE.
820  *
821  * Returns -1 on failure.
822  */
823 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
824 {
825         struct ring_buffer_per_cpu *cpu_buffer;
826         unsigned nr_pages, rm_pages, new_pages;
827         struct buffer_page *bpage, *tmp;
828         unsigned long buffer_size;
829         unsigned long addr;
830         LIST_HEAD(pages);
831         int i, cpu;
832
833         /*
834          * Always succeed at resizing a non-existent buffer:
835          */
836         if (!buffer)
837                 return size;
838
839         size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
840         size *= BUF_PAGE_SIZE;
841         buffer_size = buffer->pages * BUF_PAGE_SIZE;
842
843         /* we need a minimum of two pages */
844         if (size < BUF_PAGE_SIZE * 2)
845                 size = BUF_PAGE_SIZE * 2;
846
847         if (size == buffer_size)
848                 return size;
849
850         mutex_lock(&buffer->mutex);
851         get_online_cpus();
852
853         nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
854
855         if (size < buffer_size) {
856
857                 /* easy case, just free pages */
858                 if (RB_WARN_ON(buffer, nr_pages >= buffer->pages))
859                         goto out_fail;
860
861                 rm_pages = buffer->pages - nr_pages;
862
863                 for_each_buffer_cpu(buffer, cpu) {
864                         cpu_buffer = buffer->buffers[cpu];
865                         rb_remove_pages(cpu_buffer, rm_pages);
866                 }
867                 goto out;
868         }
869
870         /*
871          * This is a bit more difficult. We only want to add pages
872          * when we can allocate enough for all CPUs. We do this
873          * by allocating all the pages and storing them on a local
874          * link list. If we succeed in our allocation, then we
875          * add these pages to the cpu_buffers. Otherwise we just free
876          * them all and return -ENOMEM;
877          */
878         if (RB_WARN_ON(buffer, nr_pages <= buffer->pages))
879                 goto out_fail;
880
881         new_pages = nr_pages - buffer->pages;
882
883         for_each_buffer_cpu(buffer, cpu) {
884                 for (i = 0; i < new_pages; i++) {
885                         bpage = kzalloc_node(ALIGN(sizeof(*bpage),
886                                                   cache_line_size()),
887                                             GFP_KERNEL, cpu_to_node(cpu));
888                         if (!bpage)
889                                 goto free_pages;
890                         list_add(&bpage->list, &pages);
891                         addr = __get_free_page(GFP_KERNEL);
892                         if (!addr)
893                                 goto free_pages;
894                         bpage->page = (void *)addr;
895                         rb_init_page(bpage->page);
896                 }
897         }
898
899         for_each_buffer_cpu(buffer, cpu) {
900                 cpu_buffer = buffer->buffers[cpu];
901                 rb_insert_pages(cpu_buffer, &pages, new_pages);
902         }
903
904         if (RB_WARN_ON(buffer, !list_empty(&pages)))
905                 goto out_fail;
906
907  out:
908         buffer->pages = nr_pages;
909         put_online_cpus();
910         mutex_unlock(&buffer->mutex);
911
912         return size;
913
914  free_pages:
915         list_for_each_entry_safe(bpage, tmp, &pages, list) {
916                 list_del_init(&bpage->list);
917                 free_buffer_page(bpage);
918         }
919         put_online_cpus();
920         mutex_unlock(&buffer->mutex);
921         return -ENOMEM;
922
923         /*
924          * Something went totally wrong, and we are too paranoid
925          * to even clean up the mess.
926          */
927  out_fail:
928         put_online_cpus();
929         mutex_unlock(&buffer->mutex);
930         return -1;
931 }
932 EXPORT_SYMBOL_GPL(ring_buffer_resize);
933
934 static inline void *
935 __rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
936 {
937         return bpage->data + index;
938 }
939
940 static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
941 {
942         return bpage->page->data + index;
943 }
944
945 static inline struct ring_buffer_event *
946 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
947 {
948         return __rb_page_index(cpu_buffer->reader_page,
949                                cpu_buffer->reader_page->read);
950 }
951
952 static inline struct ring_buffer_event *
953 rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
954 {
955         return __rb_page_index(cpu_buffer->head_page,
956                                cpu_buffer->head_page->read);
957 }
958
959 static inline struct ring_buffer_event *
960 rb_iter_head_event(struct ring_buffer_iter *iter)
961 {
962         return __rb_page_index(iter->head_page, iter->head);
963 }
964
965 static inline unsigned rb_page_write(struct buffer_page *bpage)
966 {
967         return local_read(&bpage->write);
968 }
969
970 static inline unsigned rb_page_commit(struct buffer_page *bpage)
971 {
972         return local_read(&bpage->page->commit);
973 }
974
975 /* Size is determined by what has been commited */
976 static inline unsigned rb_page_size(struct buffer_page *bpage)
977 {
978         return rb_page_commit(bpage);
979 }
980
981 static inline unsigned
982 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
983 {
984         return rb_page_commit(cpu_buffer->commit_page);
985 }
986
987 static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
988 {
989         return rb_page_commit(cpu_buffer->head_page);
990 }
991
992 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
993                                struct buffer_page **bpage)
994 {
995         struct list_head *p = (*bpage)->list.next;
996
997         if (p == &cpu_buffer->pages)
998                 p = p->next;
999
1000         *bpage = list_entry(p, struct buffer_page, list);
1001 }
1002
1003 static inline unsigned
1004 rb_event_index(struct ring_buffer_event *event)
1005 {
1006         unsigned long addr = (unsigned long)event;
1007
1008         return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
1009 }
1010
1011 static inline int
1012 rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
1013              struct ring_buffer_event *event)
1014 {
1015         unsigned long addr = (unsigned long)event;
1016         unsigned long index;
1017
1018         index = rb_event_index(event);
1019         addr &= PAGE_MASK;
1020
1021         return cpu_buffer->commit_page->page == (void *)addr &&
1022                 rb_commit_index(cpu_buffer) == index;
1023 }
1024
1025 static void
1026 rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
1027                     struct ring_buffer_event *event)
1028 {
1029         unsigned long addr = (unsigned long)event;
1030         unsigned long index;
1031
1032         index = rb_event_index(event);
1033         addr &= PAGE_MASK;
1034
1035         while (cpu_buffer->commit_page->page != (void *)addr) {
1036                 if (RB_WARN_ON(cpu_buffer,
1037                           cpu_buffer->commit_page == cpu_buffer->tail_page))
1038                         return;
1039                 cpu_buffer->commit_page->page->commit =
1040                         cpu_buffer->commit_page->write;
1041                 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
1042                 cpu_buffer->write_stamp =
1043                         cpu_buffer->commit_page->page->time_stamp;
1044         }
1045
1046         /* Now set the commit to the event's index */
1047         local_set(&cpu_buffer->commit_page->page->commit, index);
1048 }
1049
1050 static void
1051 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
1052 {
1053         /*
1054          * We only race with interrupts and NMIs on this CPU.
1055          * If we own the commit event, then we can commit
1056          * all others that interrupted us, since the interruptions
1057          * are in stack format (they finish before they come
1058          * back to us). This allows us to do a simple loop to
1059          * assign the commit to the tail.
1060          */
1061  again:
1062         while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
1063                 cpu_buffer->commit_page->page->commit =
1064                         cpu_buffer->commit_page->write;
1065                 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
1066                 cpu_buffer->write_stamp =
1067                         cpu_buffer->commit_page->page->time_stamp;
1068                 /* add barrier to keep gcc from optimizing too much */
1069                 barrier();
1070         }
1071         while (rb_commit_index(cpu_buffer) !=
1072                rb_page_write(cpu_buffer->commit_page)) {
1073                 cpu_buffer->commit_page->page->commit =
1074                         cpu_buffer->commit_page->write;
1075                 barrier();
1076         }
1077
1078         /* again, keep gcc from optimizing */
1079         barrier();
1080
1081         /*
1082          * If an interrupt came in just after the first while loop
1083          * and pushed the tail page forward, we will be left with
1084          * a dangling commit that will never go forward.
1085          */
1086         if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
1087                 goto again;
1088 }
1089
1090 static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1091 {
1092         cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
1093         cpu_buffer->reader_page->read = 0;
1094 }
1095
1096 static void rb_inc_iter(struct ring_buffer_iter *iter)
1097 {
1098         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1099
1100         /*
1101          * The iterator could be on the reader page (it starts there).
1102          * But the head could have moved, since the reader was
1103          * found. Check for this case and assign the iterator
1104          * to the head page instead of next.
1105          */
1106         if (iter->head_page == cpu_buffer->reader_page)
1107                 iter->head_page = cpu_buffer->head_page;
1108         else
1109                 rb_inc_page(cpu_buffer, &iter->head_page);
1110
1111         iter->read_stamp = iter->head_page->page->time_stamp;
1112         iter->head = 0;
1113 }
1114
1115 /**
1116  * ring_buffer_update_event - update event type and data
1117  * @event: the even to update
1118  * @type: the type of event
1119  * @length: the size of the event field in the ring buffer
1120  *
1121  * Update the type and data fields of the event. The length
1122  * is the actual size that is written to the ring buffer,
1123  * and with this, we can determine what to place into the
1124  * data field.
1125  */
1126 static void
1127 rb_update_event(struct ring_buffer_event *event,
1128                          unsigned type, unsigned length)
1129 {
1130         event->type_len = type;
1131
1132         switch (type) {
1133
1134         case RINGBUF_TYPE_PADDING:
1135         case RINGBUF_TYPE_TIME_EXTEND:
1136         case RINGBUF_TYPE_TIME_STAMP:
1137                 break;
1138
1139         case 0:
1140                 length -= RB_EVNT_HDR_SIZE;
1141                 if (length > RB_MAX_SMALL_DATA)
1142                         event->array[0] = length;
1143                 else
1144                         event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
1145                 break;
1146         default:
1147                 BUG();
1148         }
1149 }
1150
1151 static unsigned rb_calculate_event_length(unsigned length)
1152 {
1153         struct ring_buffer_event event; /* Used only for sizeof array */
1154
1155         /* zero length can cause confusions */
1156         if (!length)
1157                 length = 1;
1158
1159         if (length > RB_MAX_SMALL_DATA)
1160                 length += sizeof(event.array[0]);
1161
1162         length += RB_EVNT_HDR_SIZE;
1163         length = ALIGN(length, RB_ALIGNMENT);
1164
1165         return length;
1166 }
1167
1168
1169 static struct ring_buffer_event *
1170 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
1171              unsigned long length, unsigned long tail,
1172              struct buffer_page *commit_page,
1173              struct buffer_page *tail_page, u64 *ts)
1174 {
1175         struct buffer_page *next_page, *head_page, *reader_page;
1176         struct ring_buffer *buffer = cpu_buffer->buffer;
1177         struct ring_buffer_event *event;
1178         bool lock_taken = false;
1179         unsigned long flags;
1180
1181         next_page = tail_page;
1182
1183         local_irq_save(flags);
1184         /*
1185          * Since the write to the buffer is still not
1186          * fully lockless, we must be careful with NMIs.
1187          * The locks in the writers are taken when a write
1188          * crosses to a new page. The locks protect against
1189          * races with the readers (this will soon be fixed
1190          * with a lockless solution).
1191          *
1192          * Because we can not protect against NMIs, and we
1193          * want to keep traces reentrant, we need to manage
1194          * what happens when we are in an NMI.
1195          *
1196          * NMIs can happen after we take the lock.
1197          * If we are in an NMI, only take the lock
1198          * if it is not already taken. Otherwise
1199          * simply fail.
1200          */
1201         if (unlikely(in_nmi())) {
1202                 if (!__raw_spin_trylock(&cpu_buffer->lock)) {
1203                         cpu_buffer->nmi_dropped++;
1204                         goto out_reset;
1205                 }
1206         } else
1207                 __raw_spin_lock(&cpu_buffer->lock);
1208
1209         lock_taken = true;
1210
1211         rb_inc_page(cpu_buffer, &next_page);
1212
1213         head_page = cpu_buffer->head_page;
1214         reader_page = cpu_buffer->reader_page;
1215
1216         /* we grabbed the lock before incrementing */
1217         if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
1218                 goto out_reset;
1219
1220         /*
1221          * If for some reason, we had an interrupt storm that made
1222          * it all the way around the buffer, bail, and warn
1223          * about it.
1224          */
1225         if (unlikely(next_page == commit_page)) {
1226                 cpu_buffer->commit_overrun++;
1227                 goto out_reset;
1228         }
1229
1230         if (next_page == head_page) {
1231                 if (!(buffer->flags & RB_FL_OVERWRITE))
1232                         goto out_reset;
1233
1234                 /* tail_page has not moved yet? */
1235                 if (tail_page == cpu_buffer->tail_page) {
1236                         /* count overflows */
1237                         cpu_buffer->overrun +=
1238                                 local_read(&head_page->entries);
1239
1240                         rb_inc_page(cpu_buffer, &head_page);
1241                         cpu_buffer->head_page = head_page;
1242                         cpu_buffer->head_page->read = 0;
1243                 }
1244         }
1245
1246         /*
1247          * If the tail page is still the same as what we think
1248          * it is, then it is up to us to update the tail
1249          * pointer.
1250          */
1251         if (tail_page == cpu_buffer->tail_page) {
1252                 local_set(&next_page->write, 0);
1253                 local_set(&next_page->entries, 0);
1254                 local_set(&next_page->page->commit, 0);
1255                 cpu_buffer->tail_page = next_page;
1256
1257                 /* reread the time stamp */
1258                 *ts = rb_time_stamp(buffer, cpu_buffer->cpu);
1259                 cpu_buffer->tail_page->page->time_stamp = *ts;
1260         }
1261
1262         /*
1263          * The actual tail page has moved forward.
1264          */
1265         if (tail < BUF_PAGE_SIZE) {
1266                 /* Mark the rest of the page with padding */
1267                 event = __rb_page_index(tail_page, tail);
1268                 rb_event_set_padding(event);
1269         }
1270
1271         /* Set the write back to the previous setting */
1272         local_sub(length, &tail_page->write);
1273
1274         /*
1275          * If this was a commit entry that failed,
1276          * increment that too
1277          */
1278         if (tail_page == cpu_buffer->commit_page &&
1279             tail == rb_commit_index(cpu_buffer)) {
1280                 rb_set_commit_to_write(cpu_buffer);
1281         }
1282
1283         __raw_spin_unlock(&cpu_buffer->lock);
1284         local_irq_restore(flags);
1285
1286         /* fail and let the caller try again */
1287         return ERR_PTR(-EAGAIN);
1288
1289  out_reset:
1290         /* reset write */
1291         local_sub(length, &tail_page->write);
1292
1293         if (likely(lock_taken))
1294                 __raw_spin_unlock(&cpu_buffer->lock);
1295         local_irq_restore(flags);
1296         return NULL;
1297 }
1298
1299 static struct ring_buffer_event *
1300 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1301                   unsigned type, unsigned long length, u64 *ts)
1302 {
1303         struct buffer_page *tail_page, *commit_page;
1304         struct ring_buffer_event *event;
1305         unsigned long tail, write;
1306
1307         commit_page = cpu_buffer->commit_page;
1308         /* we just need to protect against interrupts */
1309         barrier();
1310         tail_page = cpu_buffer->tail_page;
1311         write = local_add_return(length, &tail_page->write);
1312         tail = write - length;
1313
1314         /* See if we shot pass the end of this buffer page */
1315         if (write > BUF_PAGE_SIZE)
1316                 return rb_move_tail(cpu_buffer, length, tail,
1317                                     commit_page, tail_page, ts);
1318
1319         /* We reserved something on the buffer */
1320
1321         if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE))
1322                 return NULL;
1323
1324         event = __rb_page_index(tail_page, tail);
1325         rb_update_event(event, type, length);
1326
1327         /* The passed in type is zero for DATA */
1328         if (likely(!type))
1329                 local_inc(&tail_page->entries);
1330
1331         /*
1332          * If this is a commit and the tail is zero, then update
1333          * this page's time stamp.
1334          */
1335         if (!tail && rb_is_commit(cpu_buffer, event))
1336                 cpu_buffer->commit_page->page->time_stamp = *ts;
1337
1338         return event;
1339 }
1340
1341 static inline int
1342 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
1343                   struct ring_buffer_event *event)
1344 {
1345         unsigned long new_index, old_index;
1346         struct buffer_page *bpage;
1347         unsigned long index;
1348         unsigned long addr;
1349
1350         new_index = rb_event_index(event);
1351         old_index = new_index + rb_event_length(event);
1352         addr = (unsigned long)event;
1353         addr &= PAGE_MASK;
1354
1355         bpage = cpu_buffer->tail_page;
1356
1357         if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
1358                 /*
1359                  * This is on the tail page. It is possible that
1360                  * a write could come in and move the tail page
1361                  * and write to the next page. That is fine
1362                  * because we just shorten what is on this page.
1363                  */
1364                 index = local_cmpxchg(&bpage->write, old_index, new_index);
1365                 if (index == old_index)
1366                         return 1;
1367         }
1368
1369         /* could not discard */
1370         return 0;
1371 }
1372
1373 static int
1374 rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1375                   u64 *ts, u64 *delta)
1376 {
1377         struct ring_buffer_event *event;
1378         static int once;
1379         int ret;
1380
1381         if (unlikely(*delta > (1ULL << 59) && !once++)) {
1382                 printk(KERN_WARNING "Delta way too big! %llu"
1383                        " ts=%llu write stamp = %llu\n",
1384                        (unsigned long long)*delta,
1385                        (unsigned long long)*ts,
1386                        (unsigned long long)cpu_buffer->write_stamp);
1387                 WARN_ON(1);
1388         }
1389
1390         /*
1391          * The delta is too big, we to add a
1392          * new timestamp.
1393          */
1394         event = __rb_reserve_next(cpu_buffer,
1395                                   RINGBUF_TYPE_TIME_EXTEND,
1396                                   RB_LEN_TIME_EXTEND,
1397                                   ts);
1398         if (!event)
1399                 return -EBUSY;
1400
1401         if (PTR_ERR(event) == -EAGAIN)
1402                 return -EAGAIN;
1403
1404         /* Only a commited time event can update the write stamp */
1405         if (rb_is_commit(cpu_buffer, event)) {
1406                 /*
1407                  * If this is the first on the page, then we need to
1408                  * update the page itself, and just put in a zero.
1409                  */
1410                 if (rb_event_index(event)) {
1411                         event->time_delta = *delta & TS_MASK;
1412                         event->array[0] = *delta >> TS_SHIFT;
1413                 } else {
1414                         cpu_buffer->commit_page->page->time_stamp = *ts;
1415                         /* try to discard, since we do not need this */
1416                         if (!rb_try_to_discard(cpu_buffer, event)) {
1417                                 /* nope, just zero it */
1418                                 event->time_delta = 0;
1419                                 event->array[0] = 0;
1420                         }
1421                 }
1422                 cpu_buffer->write_stamp = *ts;
1423                 /* let the caller know this was the commit */
1424                 ret = 1;
1425         } else {
1426                 /* Try to discard the event */
1427                 if (!rb_try_to_discard(cpu_buffer, event)) {
1428                         /* Darn, this is just wasted space */
1429                         event->time_delta = 0;
1430                         event->array[0] = 0;
1431                         ret = 0;
1432                 }
1433         }
1434
1435         *delta = 0;
1436
1437         return ret;
1438 }
1439
1440 static struct ring_buffer_event *
1441 rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1442                       unsigned long length)
1443 {
1444         struct ring_buffer_event *event;
1445         u64 ts, delta = 0;
1446         int commit = 0;
1447         int nr_loops = 0;
1448
1449         length = rb_calculate_event_length(length);
1450  again:
1451         /*
1452          * We allow for interrupts to reenter here and do a trace.
1453          * If one does, it will cause this original code to loop
1454          * back here. Even with heavy interrupts happening, this
1455          * should only happen a few times in a row. If this happens
1456          * 1000 times in a row, there must be either an interrupt
1457          * storm or we have something buggy.
1458          * Bail!
1459          */
1460         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
1461                 return NULL;
1462
1463         ts = rb_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu);
1464
1465         /*
1466          * Only the first commit can update the timestamp.
1467          * Yes there is a race here. If an interrupt comes in
1468          * just after the conditional and it traces too, then it
1469          * will also check the deltas. More than one timestamp may
1470          * also be made. But only the entry that did the actual
1471          * commit will be something other than zero.
1472          */
1473         if (likely(cpu_buffer->tail_page == cpu_buffer->commit_page &&
1474                    rb_page_write(cpu_buffer->tail_page) ==
1475                    rb_commit_index(cpu_buffer))) {
1476                 u64 diff;
1477
1478                 diff = ts - cpu_buffer->write_stamp;
1479
1480                 /* make sure this diff is calculated here */
1481                 barrier();
1482
1483                 /* Did the write stamp get updated already? */
1484                 if (unlikely(ts < cpu_buffer->write_stamp))
1485                         goto get_event;
1486
1487                 delta = diff;
1488                 if (unlikely(test_time_stamp(delta))) {
1489
1490                         commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
1491                         if (commit == -EBUSY)
1492                                 return NULL;
1493
1494                         if (commit == -EAGAIN)
1495                                 goto again;
1496
1497                         RB_WARN_ON(cpu_buffer, commit < 0);
1498                 }
1499         }
1500
1501  get_event:
1502         event = __rb_reserve_next(cpu_buffer, 0, length, &ts);
1503         if (unlikely(PTR_ERR(event) == -EAGAIN))
1504                 goto again;
1505
1506         if (!event) {
1507                 if (unlikely(commit))
1508                         /*
1509                          * Ouch! We needed a timestamp and it was commited. But
1510                          * we didn't get our event reserved.
1511                          */
1512                         rb_set_commit_to_write(cpu_buffer);
1513                 return NULL;
1514         }
1515
1516         /*
1517          * If the timestamp was commited, make the commit our entry
1518          * now so that we will update it when needed.
1519          */
1520         if (unlikely(commit))
1521                 rb_set_commit_event(cpu_buffer, event);
1522         else if (!rb_is_commit(cpu_buffer, event))
1523                 delta = 0;
1524
1525         event->time_delta = delta;
1526
1527         return event;
1528 }
1529
1530 #define TRACE_RECURSIVE_DEPTH 16
1531
1532 static int trace_recursive_lock(void)
1533 {
1534         current->trace_recursion++;
1535
1536         if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH))
1537                 return 0;
1538
1539         /* Disable all tracing before we do anything else */
1540         tracing_off_permanent();
1541
1542         printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:"
1543                     "HC[%lu]:SC[%lu]:NMI[%lu]\n",
1544                     current->trace_recursion,
1545                     hardirq_count() >> HARDIRQ_SHIFT,
1546                     softirq_count() >> SOFTIRQ_SHIFT,
1547                     in_nmi());
1548
1549         WARN_ON_ONCE(1);
1550         return -1;
1551 }
1552
1553 static void trace_recursive_unlock(void)
1554 {
1555         WARN_ON_ONCE(!current->trace_recursion);
1556
1557         current->trace_recursion--;
1558 }
1559
1560 static DEFINE_PER_CPU(int, rb_need_resched);
1561
1562 /**
1563  * ring_buffer_lock_reserve - reserve a part of the buffer
1564  * @buffer: the ring buffer to reserve from
1565  * @length: the length of the data to reserve (excluding event header)
1566  *
1567  * Returns a reseverd event on the ring buffer to copy directly to.
1568  * The user of this interface will need to get the body to write into
1569  * and can use the ring_buffer_event_data() interface.
1570  *
1571  * The length is the length of the data needed, not the event length
1572  * which also includes the event header.
1573  *
1574  * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
1575  * If NULL is returned, then nothing has been allocated or locked.
1576  */
1577 struct ring_buffer_event *
1578 ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
1579 {
1580         struct ring_buffer_per_cpu *cpu_buffer;
1581         struct ring_buffer_event *event;
1582         int cpu, resched;
1583
1584         if (ring_buffer_flags != RB_BUFFERS_ON)
1585                 return NULL;
1586
1587         if (atomic_read(&buffer->record_disabled))
1588                 return NULL;
1589
1590         /* If we are tracing schedule, we don't want to recurse */
1591         resched = ftrace_preempt_disable();
1592
1593         if (trace_recursive_lock())
1594                 goto out_nocheck;
1595
1596         cpu = raw_smp_processor_id();
1597
1598         if (!cpumask_test_cpu(cpu, buffer->cpumask))
1599                 goto out;
1600
1601         cpu_buffer = buffer->buffers[cpu];
1602
1603         if (atomic_read(&cpu_buffer->record_disabled))
1604                 goto out;
1605
1606         if (length > BUF_MAX_DATA_SIZE)
1607                 goto out;
1608
1609         event = rb_reserve_next_event(cpu_buffer, length);
1610         if (!event)
1611                 goto out;
1612
1613         /*
1614          * Need to store resched state on this cpu.
1615          * Only the first needs to.
1616          */
1617
1618         if (preempt_count() == 1)
1619                 per_cpu(rb_need_resched, cpu) = resched;
1620
1621         return event;
1622
1623  out:
1624         trace_recursive_unlock();
1625
1626  out_nocheck:
1627         ftrace_preempt_enable(resched);
1628         return NULL;
1629 }
1630 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
1631
1632 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1633                       struct ring_buffer_event *event)
1634 {
1635         local_inc(&cpu_buffer->entries);
1636
1637         /* Only process further if we own the commit */
1638         if (!rb_is_commit(cpu_buffer, event))
1639                 return;
1640
1641         cpu_buffer->write_stamp += event->time_delta;
1642
1643         rb_set_commit_to_write(cpu_buffer);
1644 }
1645
1646 /**
1647  * ring_buffer_unlock_commit - commit a reserved
1648  * @buffer: The buffer to commit to
1649  * @event: The event pointer to commit.
1650  *
1651  * This commits the data to the ring buffer, and releases any locks held.
1652  *
1653  * Must be paired with ring_buffer_lock_reserve.
1654  */
1655 int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1656                               struct ring_buffer_event *event)
1657 {
1658         struct ring_buffer_per_cpu *cpu_buffer;
1659         int cpu = raw_smp_processor_id();
1660
1661         cpu_buffer = buffer->buffers[cpu];
1662
1663         rb_commit(cpu_buffer, event);
1664
1665         trace_recursive_unlock();
1666
1667         /*
1668          * Only the last preempt count needs to restore preemption.
1669          */
1670         if (preempt_count() == 1)
1671                 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1672         else
1673                 preempt_enable_no_resched_notrace();
1674
1675         return 0;
1676 }
1677 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
1678
1679 static inline void rb_event_discard(struct ring_buffer_event *event)
1680 {
1681         /* array[0] holds the actual length for the discarded event */
1682         event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
1683         event->type_len = RINGBUF_TYPE_PADDING;
1684         /* time delta must be non zero */
1685         if (!event->time_delta)
1686                 event->time_delta = 1;
1687 }
1688
1689 /**
1690  * ring_buffer_event_discard - discard any event in the ring buffer
1691  * @event: the event to discard
1692  *
1693  * Sometimes a event that is in the ring buffer needs to be ignored.
1694  * This function lets the user discard an event in the ring buffer
1695  * and then that event will not be read later.
1696  *
1697  * Note, it is up to the user to be careful with this, and protect
1698  * against races. If the user discards an event that has been consumed
1699  * it is possible that it could corrupt the ring buffer.
1700  */
1701 void ring_buffer_event_discard(struct ring_buffer_event *event)
1702 {
1703         rb_event_discard(event);
1704 }
1705 EXPORT_SYMBOL_GPL(ring_buffer_event_discard);
1706
1707 /**
1708  * ring_buffer_commit_discard - discard an event that has not been committed
1709  * @buffer: the ring buffer
1710  * @event: non committed event to discard
1711  *
1712  * This is similar to ring_buffer_event_discard but must only be
1713  * performed on an event that has not been committed yet. The difference
1714  * is that this will also try to free the event from the ring buffer
1715  * if another event has not been added behind it.
1716  *
1717  * If another event has been added behind it, it will set the event
1718  * up as discarded, and perform the commit.
1719  *
1720  * If this function is called, do not call ring_buffer_unlock_commit on
1721  * the event.
1722  */
1723 void ring_buffer_discard_commit(struct ring_buffer *buffer,
1724                                 struct ring_buffer_event *event)
1725 {
1726         struct ring_buffer_per_cpu *cpu_buffer;
1727         int cpu;
1728
1729         /* The event is discarded regardless */
1730         rb_event_discard(event);
1731
1732         /*
1733          * This must only be called if the event has not been
1734          * committed yet. Thus we can assume that preemption
1735          * is still disabled.
1736          */
1737         RB_WARN_ON(buffer, preemptible());
1738
1739         cpu = smp_processor_id();
1740         cpu_buffer = buffer->buffers[cpu];
1741
1742         if (!rb_try_to_discard(cpu_buffer, event))
1743                 goto out;
1744
1745         /*
1746          * The commit is still visible by the reader, so we
1747          * must increment entries.
1748          */
1749         local_inc(&cpu_buffer->entries);
1750  out:
1751         /*
1752          * If a write came in and pushed the tail page
1753          * we still need to update the commit pointer
1754          * if we were the commit.
1755          */
1756         if (rb_is_commit(cpu_buffer, event))
1757                 rb_set_commit_to_write(cpu_buffer);
1758
1759         trace_recursive_unlock();
1760
1761         /*
1762          * Only the last preempt count needs to restore preemption.
1763          */
1764         if (preempt_count() == 1)
1765                 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1766         else
1767                 preempt_enable_no_resched_notrace();
1768
1769 }
1770 EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
1771
1772 /**
1773  * ring_buffer_write - write data to the buffer without reserving
1774  * @buffer: The ring buffer to write to.
1775  * @length: The length of the data being written (excluding the event header)
1776  * @data: The data to write to the buffer.
1777  *
1778  * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
1779  * one function. If you already have the data to write to the buffer, it
1780  * may be easier to simply call this function.
1781  *
1782  * Note, like ring_buffer_lock_reserve, the length is the length of the data
1783  * and not the length of the event which would hold the header.
1784  */
1785 int ring_buffer_write(struct ring_buffer *buffer,
1786                         unsigned long length,
1787                         void *data)
1788 {
1789         struct ring_buffer_per_cpu *cpu_buffer;
1790         struct ring_buffer_event *event;
1791         void *body;
1792         int ret = -EBUSY;
1793         int cpu, resched;
1794
1795         if (ring_buffer_flags != RB_BUFFERS_ON)
1796                 return -EBUSY;
1797
1798         if (atomic_read(&buffer->record_disabled))
1799                 return -EBUSY;
1800
1801         resched = ftrace_preempt_disable();
1802
1803         cpu = raw_smp_processor_id();
1804
1805         if (!cpumask_test_cpu(cpu, buffer->cpumask))
1806                 goto out;
1807
1808         cpu_buffer = buffer->buffers[cpu];
1809
1810         if (atomic_read(&cpu_buffer->record_disabled))
1811                 goto out;
1812
1813         if (length > BUF_MAX_DATA_SIZE)
1814                 goto out;
1815
1816         event = rb_reserve_next_event(cpu_buffer, length);
1817         if (!event)
1818                 goto out;
1819
1820         body = rb_event_data(event);
1821
1822         memcpy(body, data, length);
1823
1824         rb_commit(cpu_buffer, event);
1825
1826         ret = 0;
1827  out:
1828         ftrace_preempt_enable(resched);
1829
1830         return ret;
1831 }
1832 EXPORT_SYMBOL_GPL(ring_buffer_write);
1833
1834 static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1835 {
1836         struct buffer_page *reader = cpu_buffer->reader_page;
1837         struct buffer_page *head = cpu_buffer->head_page;
1838         struct buffer_page *commit = cpu_buffer->commit_page;
1839
1840         return reader->read == rb_page_commit(reader) &&
1841                 (commit == reader ||
1842                  (commit == head &&
1843                   head->read == rb_page_commit(commit)));
1844 }
1845
1846 /**
1847  * ring_buffer_record_disable - stop all writes into the buffer
1848  * @buffer: The ring buffer to stop writes to.
1849  *
1850  * This prevents all writes to the buffer. Any attempt to write
1851  * to the buffer after this will fail and return NULL.
1852  *
1853  * The caller should call synchronize_sched() after this.
1854  */
1855 void ring_buffer_record_disable(struct ring_buffer *buffer)
1856 {
1857         atomic_inc(&buffer->record_disabled);
1858 }
1859 EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
1860
1861 /**
1862  * ring_buffer_record_enable - enable writes to the buffer
1863  * @buffer: The ring buffer to enable writes
1864  *
1865  * Note, multiple disables will need the same number of enables
1866  * to truely enable the writing (much like preempt_disable).
1867  */
1868 void ring_buffer_record_enable(struct ring_buffer *buffer)
1869 {
1870         atomic_dec(&buffer->record_disabled);
1871 }
1872 EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
1873
1874 /**
1875  * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1876  * @buffer: The ring buffer to stop writes to.
1877  * @cpu: The CPU buffer to stop
1878  *
1879  * This prevents all writes to the buffer. Any attempt to write
1880  * to the buffer after this will fail and return NULL.
1881  *
1882  * The caller should call synchronize_sched() after this.
1883  */
1884 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1885 {
1886         struct ring_buffer_per_cpu *cpu_buffer;
1887
1888         if (!cpumask_test_cpu(cpu, buffer->cpumask))
1889                 return;
1890
1891         cpu_buffer = buffer->buffers[cpu];
1892         atomic_inc(&cpu_buffer->record_disabled);
1893 }
1894 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
1895
1896 /**
1897  * ring_buffer_record_enable_cpu - enable writes to the buffer
1898  * @buffer: The ring buffer to enable writes
1899  * @cpu: The CPU to enable.
1900  *
1901  * Note, multiple disables will need the same number of enables
1902  * to truely enable the writing (much like preempt_disable).
1903  */
1904 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1905 {
1906         struct ring_buffer_per_cpu *cpu_buffer;
1907
1908         if (!cpumask_test_cpu(cpu, buffer->cpumask))
1909                 return;
1910
1911         cpu_buffer = buffer->buffers[cpu];
1912         atomic_dec(&cpu_buffer->record_disabled);
1913 }
1914 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
1915
1916 /**
1917  * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1918  * @buffer: The ring buffer
1919  * @cpu: The per CPU buffer to get the entries from.
1920  */
1921 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1922 {
1923         struct ring_buffer_per_cpu *cpu_buffer;
1924         unsigned long ret;
1925
1926         if (!cpumask_test_cpu(cpu, buffer->cpumask))
1927                 return 0;
1928
1929         cpu_buffer = buffer->buffers[cpu];
1930         ret = (local_read(&cpu_buffer->entries) - cpu_buffer->overrun)
1931                 - cpu_buffer->read;
1932
1933         return ret;
1934 }
1935 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
1936
1937 /**
1938  * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1939  * @buffer: The ring buffer
1940  * @cpu: The per CPU buffer to get the number of overruns from
1941  */
1942 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1943 {
1944         struct ring_buffer_per_cpu *cpu_buffer;
1945         unsigned long ret;
1946
1947         if (!cpumask_test_cpu(cpu, buffer->cpumask))
1948                 return 0;
1949
1950         cpu_buffer = buffer->buffers[cpu];
1951         ret = cpu_buffer->overrun;
1952
1953         return ret;
1954 }
1955 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
1956
1957 /**
1958  * ring_buffer_nmi_dropped_cpu - get the number of nmis that were dropped
1959  * @buffer: The ring buffer
1960  * @cpu: The per CPU buffer to get the number of overruns from
1961  */
1962 unsigned long ring_buffer_nmi_dropped_cpu(struct ring_buffer *buffer, int cpu)
1963 {
1964         struct ring_buffer_per_cpu *cpu_buffer;
1965         unsigned long ret;
1966
1967         if (!cpumask_test_cpu(cpu, buffer->cpumask))
1968                 return 0;
1969
1970         cpu_buffer = buffer->buffers[cpu];
1971         ret = cpu_buffer->nmi_dropped;
1972
1973         return ret;
1974 }
1975 EXPORT_SYMBOL_GPL(ring_buffer_nmi_dropped_cpu);
1976
1977 /**
1978  * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits
1979  * @buffer: The ring buffer
1980  * @cpu: The per CPU buffer to get the number of overruns from
1981  */
1982 unsigned long
1983 ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
1984 {
1985         struct ring_buffer_per_cpu *cpu_buffer;
1986         unsigned long ret;
1987
1988         if (!cpumask_test_cpu(cpu, buffer->cpumask))
1989                 return 0;
1990
1991         cpu_buffer = buffer->buffers[cpu];
1992         ret = cpu_buffer->commit_overrun;
1993
1994         return ret;
1995 }
1996 EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
1997
1998 /**
1999  * ring_buffer_entries - get the number of entries in a buffer
2000  * @buffer: The ring buffer
2001  *
2002  * Returns the total number of entries in the ring buffer
2003  * (all CPU entries)
2004  */
2005 unsigned long ring_buffer_entries(struct ring_buffer *buffer)
2006 {
2007         struct ring_buffer_per_cpu *cpu_buffer;
2008         unsigned long entries = 0;
2009         int cpu;
2010
2011         /* if you care about this being correct, lock the buffer */
2012         for_each_buffer_cpu(buffer, cpu) {
2013                 cpu_buffer = buffer->buffers[cpu];
2014                 entries += (local_read(&cpu_buffer->entries) -
2015                             cpu_buffer->overrun) - cpu_buffer->read;
2016         }
2017
2018         return entries;
2019 }
2020 EXPORT_SYMBOL_GPL(ring_buffer_entries);
2021
2022 /**
2023  * ring_buffer_overrun_cpu - get the number of overruns in buffer
2024  * @buffer: The ring buffer
2025  *
2026  * Returns the total number of overruns in the ring buffer
2027  * (all CPU entries)
2028  */
2029 unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
2030 {
2031         struct ring_buffer_per_cpu *cpu_buffer;
2032         unsigned long overruns = 0;
2033         int cpu;
2034
2035         /* if you care about this being correct, lock the buffer */
2036         for_each_buffer_cpu(buffer, cpu) {
2037                 cpu_buffer = buffer->buffers[cpu];
2038                 overruns += cpu_buffer->overrun;
2039         }
2040
2041         return overruns;
2042 }
2043 EXPORT_SYMBOL_GPL(ring_buffer_overruns);
2044
2045 static void rb_iter_reset(struct ring_buffer_iter *iter)
2046 {
2047         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2048
2049         /* Iterator usage is expected to have record disabled */
2050         if (list_empty(&cpu_buffer->reader_page->list)) {
2051                 iter->head_page = cpu_buffer->head_page;
2052                 iter->head = cpu_buffer->head_page->read;
2053         } else {
2054                 iter->head_page = cpu_buffer->reader_page;
2055                 iter->head = cpu_buffer->reader_page->read;
2056         }
2057         if (iter->head)
2058                 iter->read_stamp = cpu_buffer->read_stamp;
2059         else
2060                 iter->read_stamp = iter->head_page->page->time_stamp;
2061 }
2062
2063 /**
2064  * ring_buffer_iter_reset - reset an iterator
2065  * @iter: The iterator to reset
2066  *
2067  * Resets the iterator, so that it will start from the beginning
2068  * again.
2069  */
2070 void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
2071 {
2072         struct ring_buffer_per_cpu *cpu_buffer;
2073         unsigned long flags;
2074
2075         if (!iter)
2076                 return;
2077
2078         cpu_buffer = iter->cpu_buffer;
2079
2080         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2081         rb_iter_reset(iter);
2082         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2083 }
2084 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
2085
2086 /**
2087  * ring_buffer_iter_empty - check if an iterator has no more to read
2088  * @iter: The iterator to check
2089  */
2090 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
2091 {
2092         struct ring_buffer_per_cpu *cpu_buffer;
2093
2094         cpu_buffer = iter->cpu_buffer;
2095
2096         return iter->head_page == cpu_buffer->commit_page &&
2097                 iter->head == rb_commit_index(cpu_buffer);
2098 }
2099 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
2100
2101 static void
2102 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2103                      struct ring_buffer_event *event)
2104 {
2105         u64 delta;
2106
2107         switch (event->type_len) {
2108         case RINGBUF_TYPE_PADDING:
2109                 return;
2110
2111         case RINGBUF_TYPE_TIME_EXTEND:
2112                 delta = event->array[0];
2113                 delta <<= TS_SHIFT;
2114                 delta += event->time_delta;
2115                 cpu_buffer->read_stamp += delta;
2116                 return;
2117
2118         case RINGBUF_TYPE_TIME_STAMP:
2119                 /* FIXME: not implemented */
2120                 return;
2121
2122         case RINGBUF_TYPE_DATA:
2123                 cpu_buffer->read_stamp += event->time_delta;
2124                 return;
2125
2126         default:
2127                 BUG();
2128         }
2129         return;
2130 }
2131
2132 static void
2133 rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
2134                           struct ring_buffer_event *event)
2135 {
2136         u64 delta;
2137
2138         switch (event->type_len) {
2139         case RINGBUF_TYPE_PADDING:
2140                 return;
2141
2142         case RINGBUF_TYPE_TIME_EXTEND:
2143                 delta = event->array[0];
2144                 delta <<= TS_SHIFT;
2145                 delta += event->time_delta;
2146                 iter->read_stamp += delta;
2147                 return;
2148
2149         case RINGBUF_TYPE_TIME_STAMP:
2150                 /* FIXME: not implemented */
2151                 return;
2152
2153         case RINGBUF_TYPE_DATA:
2154                 iter->read_stamp += event->time_delta;
2155                 return;
2156
2157         default:
2158                 BUG();
2159         }
2160         return;
2161 }
2162
2163 static struct buffer_page *
2164 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2165 {
2166         struct buffer_page *reader = NULL;
2167         unsigned long flags;
2168         int nr_loops = 0;
2169
2170         local_irq_save(flags);
2171         __raw_spin_lock(&cpu_buffer->lock);
2172
2173  again:
2174         /*
2175          * This should normally only loop twice. But because the
2176          * start of the reader inserts an empty page, it causes
2177          * a case where we will loop three times. There should be no
2178          * reason to loop four times (that I know of).
2179          */
2180         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
2181                 reader = NULL;
2182                 goto out;
2183         }
2184
2185         reader = cpu_buffer->reader_page;
2186
2187         /* If there's more to read, return this page */
2188         if (cpu_buffer->reader_page->read < rb_page_size(reader))
2189                 goto out;
2190
2191         /* Never should we have an index greater than the size */
2192         if (RB_WARN_ON(cpu_buffer,
2193                        cpu_buffer->reader_page->read > rb_page_size(reader)))
2194                 goto out;
2195
2196         /* check if we caught up to the tail */
2197         reader = NULL;
2198         if (cpu_buffer->commit_page == cpu_buffer->reader_page)
2199                 goto out;
2200
2201         /*
2202          * Splice the empty reader page into the list around the head.
2203          * Reset the reader page to size zero.
2204          */
2205
2206         reader = cpu_buffer->head_page;
2207         cpu_buffer->reader_page->list.next = reader->list.next;
2208         cpu_buffer->reader_page->list.prev = reader->list.prev;
2209
2210         local_set(&cpu_buffer->reader_page->write, 0);
2211         local_set(&cpu_buffer->reader_page->entries, 0);
2212         local_set(&cpu_buffer->reader_page->page->commit, 0);
2213
2214         /* Make the reader page now replace the head */
2215         reader->list.prev->next = &cpu_buffer->reader_page->list;
2216         reader->list.next->prev = &cpu_buffer->reader_page->list;
2217
2218         /*
2219          * If the tail is on the reader, then we must set the head
2220          * to the inserted page, otherwise we set it one before.
2221          */
2222         cpu_buffer->head_page = cpu_buffer->reader_page;
2223
2224         if (cpu_buffer->commit_page != reader)
2225                 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
2226
2227         /* Finally update the reader page to the new head */
2228         cpu_buffer->reader_page = reader;
2229         rb_reset_reader_page(cpu_buffer);
2230
2231         goto again;
2232
2233  out:
2234         __raw_spin_unlock(&cpu_buffer->lock);
2235         local_irq_restore(flags);
2236
2237         return reader;
2238 }
2239
2240 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
2241 {
2242         struct ring_buffer_event *event;
2243         struct buffer_page *reader;
2244         unsigned length;
2245
2246         reader = rb_get_reader_page(cpu_buffer);
2247
2248         /* This function should not be called when buffer is empty */
2249         if (RB_WARN_ON(cpu_buffer, !reader))
2250                 return;
2251
2252         event = rb_reader_event(cpu_buffer);
2253
2254         if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX
2255                         || rb_discarded_event(event))
2256                 cpu_buffer->read++;
2257
2258         rb_update_read_stamp(cpu_buffer, event);
2259
2260         length = rb_event_length(event);
2261         cpu_buffer->reader_page->read += length;
2262 }
2263
2264 static void rb_advance_iter(struct ring_buffer_iter *iter)
2265 {
2266         struct ring_buffer *buffer;
2267         struct ring_buffer_per_cpu *cpu_buffer;
2268         struct ring_buffer_event *event;
2269         unsigned length;
2270
2271         cpu_buffer = iter->cpu_buffer;
2272         buffer = cpu_buffer->buffer;
2273
2274         /*
2275          * Check if we are at the end of the buffer.
2276          */
2277         if (iter->head >= rb_page_size(iter->head_page)) {
2278                 /* discarded commits can make the page empty */
2279                 if (iter->head_page == cpu_buffer->commit_page)
2280                         return;
2281                 rb_inc_iter(iter);
2282                 return;
2283         }
2284
2285         event = rb_iter_head_event(iter);
2286
2287         length = rb_event_length(event);
2288
2289         /*
2290          * This should not be called to advance the header if we are
2291          * at the tail of the buffer.
2292          */
2293         if (RB_WARN_ON(cpu_buffer,
2294                        (iter->head_page == cpu_buffer->commit_page) &&
2295                        (iter->head + length > rb_commit_index(cpu_buffer))))
2296                 return;
2297
2298         rb_update_iter_read_stamp(iter, event);
2299
2300         iter->head += length;
2301
2302         /* check for end of page padding */
2303         if ((iter->head >= rb_page_size(iter->head_page)) &&
2304             (iter->head_page != cpu_buffer->commit_page))
2305                 rb_advance_iter(iter);
2306 }
2307
2308 static struct ring_buffer_event *
2309 rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
2310 {
2311         struct ring_buffer_per_cpu *cpu_buffer;
2312         struct ring_buffer_event *event;
2313         struct buffer_page *reader;
2314         int nr_loops = 0;
2315
2316         cpu_buffer = buffer->buffers[cpu];
2317
2318  again:
2319         /*
2320          * We repeat when a timestamp is encountered. It is possible
2321          * to get multiple timestamps from an interrupt entering just
2322          * as one timestamp is about to be written, or from discarded
2323          * commits. The most that we can have is the number on a single page.
2324          */
2325         if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
2326                 return NULL;
2327
2328         reader = rb_get_reader_page(cpu_buffer);
2329         if (!reader)
2330                 return NULL;
2331
2332         event = rb_reader_event(cpu_buffer);
2333
2334         switch (event->type_len) {
2335         case RINGBUF_TYPE_PADDING:
2336                 if (rb_null_event(event))
2337                         RB_WARN_ON(cpu_buffer, 1);
2338                 /*
2339                  * Because the writer could be discarding every
2340                  * event it creates (which would probably be bad)
2341                  * if we were to go back to "again" then we may never
2342                  * catch up, and will trigger the warn on, or lock
2343                  * the box. Return the padding, and we will release
2344                  * the current locks, and try again.
2345                  */
2346                 rb_advance_reader(cpu_buffer);
2347                 return event;
2348
2349         case RINGBUF_TYPE_TIME_EXTEND:
2350                 /* Internal data, OK to advance */
2351                 rb_advance_reader(cpu_buffer);
2352                 goto again;
2353
2354         case RINGBUF_TYPE_TIME_STAMP:
2355                 /* FIXME: not implemented */
2356                 rb_advance_reader(cpu_buffer);
2357                 goto again;
2358
2359         case RINGBUF_TYPE_DATA:
2360                 if (ts) {
2361                         *ts = cpu_buffer->read_stamp + event->time_delta;
2362                         ring_buffer_normalize_time_stamp(buffer,
2363                                                          cpu_buffer->cpu, ts);
2364                 }
2365                 return event;
2366
2367         default:
2368                 BUG();
2369         }
2370
2371         return NULL;
2372 }
2373 EXPORT_SYMBOL_GPL(ring_buffer_peek);
2374
2375 static struct ring_buffer_event *
2376 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
2377 {
2378         struct ring_buffer *buffer;
2379         struct ring_buffer_per_cpu *cpu_buffer;
2380         struct ring_buffer_event *event;
2381         int nr_loops = 0;
2382
2383         if (ring_buffer_iter_empty(iter))
2384                 return NULL;
2385
2386         cpu_buffer = iter->cpu_buffer;
2387         buffer = cpu_buffer->buffer;
2388
2389  again:
2390         /*
2391          * We repeat when a timestamp is encountered.
2392          * We can get multiple timestamps by nested interrupts or also
2393          * if filtering is on (discarding commits). Since discarding
2394          * commits can be frequent we can get a lot of timestamps.
2395          * But we limit them by not adding timestamps if they begin
2396          * at the start of a page.
2397          */
2398         if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
2399                 return NULL;
2400
2401         if (rb_per_cpu_empty(cpu_buffer))
2402                 return NULL;
2403
2404         event = rb_iter_head_event(iter);
2405
2406         switch (event->type_len) {
2407         case RINGBUF_TYPE_PADDING:
2408                 if (rb_null_event(event)) {
2409                         rb_inc_iter(iter);
2410                         goto again;
2411                 }
2412                 rb_advance_iter(iter);
2413                 return event;
2414
2415         case RINGBUF_TYPE_TIME_EXTEND:
2416                 /* Internal data, OK to advance */
2417                 rb_advance_iter(iter);
2418                 goto again;
2419
2420         case RINGBUF_TYPE_TIME_STAMP:
2421                 /* FIXME: not implemented */
2422                 rb_advance_iter(iter);
2423                 goto again;
2424
2425         case RINGBUF_TYPE_DATA:
2426                 if (ts) {
2427                         *ts = iter->read_stamp + event->time_delta;
2428                         ring_buffer_normalize_time_stamp(buffer,
2429                                                          cpu_buffer->cpu, ts);
2430                 }
2431                 return event;
2432
2433         default:
2434                 BUG();
2435         }
2436
2437         return NULL;
2438 }
2439 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
2440
2441 /**
2442  * ring_buffer_peek - peek at the next event to be read
2443  * @buffer: The ring buffer to read
2444  * @cpu: The cpu to peak at
2445  * @ts: The timestamp counter of this event.
2446  *
2447  * This will return the event that will be read next, but does
2448  * not consume the data.
2449  */
2450 struct ring_buffer_event *
2451 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
2452 {
2453         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2454         struct ring_buffer_event *event;
2455         unsigned long flags;
2456
2457         if (!cpumask_test_cpu(cpu, buffer->cpumask))
2458                 return NULL;
2459
2460  again:
2461         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2462         event = rb_buffer_peek(buffer, cpu, ts);
2463         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2464
2465         if (event && event->type_len == RINGBUF_TYPE_PADDING) {
2466                 cpu_relax();
2467                 goto again;
2468         }
2469
2470         return event;
2471 }
2472
2473 /**
2474  * ring_buffer_iter_peek - peek at the next event to be read
2475  * @iter: The ring buffer iterator
2476  * @ts: The timestamp counter of this event.
2477  *
2478  * This will return the event that will be read next, but does
2479  * not increment the iterator.
2480  */
2481 struct ring_buffer_event *
2482 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
2483 {
2484         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2485         struct ring_buffer_event *event;
2486         unsigned long flags;
2487
2488  again:
2489         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2490         event = rb_iter_peek(iter, ts);
2491         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2492
2493         if (event && event->type_len == RINGBUF_TYPE_PADDING) {
2494                 cpu_relax();
2495                 goto again;
2496         }
2497
2498         return event;
2499 }
2500
2501 /**
2502  * ring_buffer_consume - return an event and consume it
2503  * @buffer: The ring buffer to get the next event from
2504  *
2505  * Returns the next event in the ring buffer, and that event is consumed.
2506  * Meaning, that sequential reads will keep returning a different event,
2507  * and eventually empty the ring buffer if the producer is slower.
2508  */
2509 struct ring_buffer_event *
2510 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
2511 {
2512         struct ring_buffer_per_cpu *cpu_buffer;
2513         struct ring_buffer_event *event = NULL;
2514         unsigned long flags;
2515
2516  again:
2517         /* might be called in atomic */
2518         preempt_disable();
2519
2520         if (!cpumask_test_cpu(cpu, buffer->cpumask))
2521                 goto out;
2522
2523         cpu_buffer = buffer->buffers[cpu];
2524         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2525
2526         event = rb_buffer_peek(buffer, cpu, ts);
2527         if (!event)
2528                 goto out_unlock;
2529
2530         rb_advance_reader(cpu_buffer);
2531
2532  out_unlock:
2533         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2534
2535  out:
2536         preempt_enable();
2537
2538         if (event && event->type_len == RINGBUF_TYPE_PADDING) {
2539                 cpu_relax();
2540                 goto again;
2541         }
2542
2543         return event;
2544 }
2545 EXPORT_SYMBOL_GPL(ring_buffer_consume);
2546
2547 /**
2548  * ring_buffer_read_start - start a non consuming read of the buffer
2549  * @buffer: The ring buffer to read from
2550  * @cpu: The cpu buffer to iterate over
2551  *
2552  * This starts up an iteration through the buffer. It also disables
2553  * the recording to the buffer until the reading is finished.
2554  * This prevents the reading from being corrupted. This is not
2555  * a consuming read, so a producer is not expected.
2556  *
2557  * Must be paired with ring_buffer_finish.
2558  */
2559 struct ring_buffer_iter *
2560 ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
2561 {
2562         struct ring_buffer_per_cpu *cpu_buffer;
2563         struct ring_buffer_iter *iter;
2564         unsigned long flags;
2565
2566         if (!cpumask_test_cpu(cpu, buffer->cpumask))
2567                 return NULL;
2568
2569         iter = kmalloc(sizeof(*iter), GFP_KERNEL);
2570         if (!iter)
2571                 return NULL;
2572
2573         cpu_buffer = buffer->buffers[cpu];
2574
2575         iter->cpu_buffer = cpu_buffer;
2576
2577         atomic_inc(&cpu_buffer->record_disabled);
2578         synchronize_sched();
2579
2580         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2581         __raw_spin_lock(&cpu_buffer->lock);
2582         rb_iter_reset(iter);
2583         __raw_spin_unlock(&cpu_buffer->lock);
2584         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2585
2586         return iter;
2587 }
2588 EXPORT_SYMBOL_GPL(ring_buffer_read_start);
2589
2590 /**
2591  * ring_buffer_finish - finish reading the iterator of the buffer
2592  * @iter: The iterator retrieved by ring_buffer_start
2593  *
2594  * This re-enables the recording to the buffer, and frees the
2595  * iterator.
2596  */
2597 void
2598 ring_buffer_read_finish(struct ring_buffer_iter *iter)
2599 {
2600         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2601
2602         atomic_dec(&cpu_buffer->record_disabled);
2603         kfree(iter);
2604 }
2605 EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
2606
2607 /**
2608  * ring_buffer_read - read the next item in the ring buffer by the iterator
2609  * @iter: The ring buffer iterator
2610  * @ts: The time stamp of the event read.
2611  *
2612  * This reads the next event in the ring buffer and increments the iterator.
2613  */
2614 struct ring_buffer_event *
2615 ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
2616 {
2617         struct ring_buffer_event *event;
2618         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2619         unsigned long flags;
2620
2621  again:
2622         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2623         event = rb_iter_peek(iter, ts);
2624         if (!event)
2625                 goto out;
2626
2627         rb_advance_iter(iter);
2628  out:
2629         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2630
2631         if (event && event->type_len == RINGBUF_TYPE_PADDING) {
2632                 cpu_relax();
2633                 goto again;
2634         }
2635
2636         return event;
2637 }
2638 EXPORT_SYMBOL_GPL(ring_buffer_read);
2639
2640 /**
2641  * ring_buffer_size - return the size of the ring buffer (in bytes)
2642  * @buffer: The ring buffer.
2643  */
2644 unsigned long ring_buffer_size(struct ring_buffer *buffer)
2645 {
2646         return BUF_PAGE_SIZE * buffer->pages;
2647 }
2648 EXPORT_SYMBOL_GPL(ring_buffer_size);
2649
2650 static void
2651 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
2652 {
2653         cpu_buffer->head_page
2654                 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
2655         local_set(&cpu_buffer->head_page->write, 0);
2656         local_set(&cpu_buffer->head_page->entries, 0);
2657         local_set(&cpu_buffer->head_page->page->commit, 0);
2658
2659         cpu_buffer->head_page->read = 0;
2660
2661         cpu_buffer->tail_page = cpu_buffer->head_page;
2662         cpu_buffer->commit_page = cpu_buffer->head_page;
2663
2664         INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
2665         local_set(&cpu_buffer->reader_page->write, 0);
2666         local_set(&cpu_buffer->reader_page->entries, 0);
2667         local_set(&cpu_buffer->reader_page->page->commit, 0);
2668         cpu_buffer->reader_page->read = 0;
2669
2670         cpu_buffer->nmi_dropped = 0;
2671         cpu_buffer->commit_overrun = 0;
2672         cpu_buffer->overrun = 0;
2673         cpu_buffer->read = 0;
2674         local_set(&cpu_buffer->entries, 0);
2675
2676         cpu_buffer->write_stamp = 0;
2677         cpu_buffer->read_stamp = 0;
2678 }
2679
2680 /**
2681  * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
2682  * @buffer: The ring buffer to reset a per cpu buffer of
2683  * @cpu: The CPU buffer to be reset
2684  */
2685 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2686 {
2687         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2688         unsigned long flags;
2689
2690         if (!cpumask_test_cpu(cpu, buffer->cpumask))
2691                 return;
2692
2693         atomic_inc(&cpu_buffer->record_disabled);
2694
2695         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2696
2697         __raw_spin_lock(&cpu_buffer->lock);
2698
2699         rb_reset_cpu(cpu_buffer);
2700
2701         __raw_spin_unlock(&cpu_buffer->lock);
2702
2703         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2704
2705         atomic_dec(&cpu_buffer->record_disabled);
2706 }
2707 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
2708
2709 /**
2710  * ring_buffer_reset - reset a ring buffer
2711  * @buffer: The ring buffer to reset all cpu buffers
2712  */
2713 void ring_buffer_reset(struct ring_buffer *buffer)
2714 {
2715         int cpu;
2716
2717         for_each_buffer_cpu(buffer, cpu)
2718                 ring_buffer_reset_cpu(buffer, cpu);
2719 }
2720 EXPORT_SYMBOL_GPL(ring_buffer_reset);
2721
2722 /**
2723  * rind_buffer_empty - is the ring buffer empty?
2724  * @buffer: The ring buffer to test
2725  */
2726 int ring_buffer_empty(struct ring_buffer *buffer)
2727 {
2728         struct ring_buffer_per_cpu *cpu_buffer;
2729         int cpu;
2730
2731         /* yes this is racy, but if you don't like the race, lock the buffer */
2732         for_each_buffer_cpu(buffer, cpu) {
2733                 cpu_buffer = buffer->buffers[cpu];
2734                 if (!rb_per_cpu_empty(cpu_buffer))
2735                         return 0;
2736         }
2737
2738         return 1;
2739 }
2740 EXPORT_SYMBOL_GPL(ring_buffer_empty);
2741
2742 /**
2743  * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
2744  * @buffer: The ring buffer
2745  * @cpu: The CPU buffer to test
2746  */
2747 int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2748 {
2749         struct ring_buffer_per_cpu *cpu_buffer;
2750         int ret;
2751
2752         if (!cpumask_test_cpu(cpu, buffer->cpumask))
2753                 return 1;
2754
2755         cpu_buffer = buffer->buffers[cpu];
2756         ret = rb_per_cpu_empty(cpu_buffer);
2757
2758
2759         return ret;
2760 }
2761 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
2762
2763 /**
2764  * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
2765  * @buffer_a: One buffer to swap with
2766  * @buffer_b: The other buffer to swap with
2767  *
2768  * This function is useful for tracers that want to take a "snapshot"
2769  * of a CPU buffer and has another back up buffer lying around.
2770  * it is expected that the tracer handles the cpu buffer not being
2771  * used at the moment.
2772  */
2773 int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2774                          struct ring_buffer *buffer_b, int cpu)
2775 {
2776         struct ring_buffer_per_cpu *cpu_buffer_a;
2777         struct ring_buffer_per_cpu *cpu_buffer_b;
2778         int ret = -EINVAL;
2779
2780         if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
2781             !cpumask_test_cpu(cpu, buffer_b->cpumask))
2782                 goto out;
2783
2784         /* At least make sure the two buffers are somewhat the same */
2785         if (buffer_a->pages != buffer_b->pages)
2786                 goto out;
2787
2788         ret = -EAGAIN;
2789
2790         if (ring_buffer_flags != RB_BUFFERS_ON)
2791                 goto out;
2792
2793         if (atomic_read(&buffer_a->record_disabled))
2794                 goto out;
2795
2796         if (atomic_read(&buffer_b->record_disabled))
2797                 goto out;
2798
2799         cpu_buffer_a = buffer_a->buffers[cpu];
2800         cpu_buffer_b = buffer_b->buffers[cpu];
2801
2802         if (atomic_read(&cpu_buffer_a->record_disabled))
2803                 goto out;
2804
2805         if (atomic_read(&cpu_buffer_b->record_disabled))
2806                 goto out;
2807
2808         /*
2809          * We can't do a synchronize_sched here because this
2810          * function can be called in atomic context.
2811          * Normally this will be called from the same CPU as cpu.
2812          * If not it's up to the caller to protect this.
2813          */
2814         atomic_inc(&cpu_buffer_a->record_disabled);
2815         atomic_inc(&cpu_buffer_b->record_disabled);
2816
2817         buffer_a->buffers[cpu] = cpu_buffer_b;
2818         buffer_b->buffers[cpu] = cpu_buffer_a;
2819
2820         cpu_buffer_b->buffer = buffer_a;
2821         cpu_buffer_a->buffer = buffer_b;
2822
2823         atomic_dec(&cpu_buffer_a->record_disabled);
2824         atomic_dec(&cpu_buffer_b->record_disabled);
2825
2826         ret = 0;
2827 out:
2828         return ret;
2829 }
2830 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
2831
2832 /**
2833  * ring_buffer_alloc_read_page - allocate a page to read from buffer
2834  * @buffer: the buffer to allocate for.
2835  *
2836  * This function is used in conjunction with ring_buffer_read_page.
2837  * When reading a full page from the ring buffer, these functions
2838  * can be used to speed up the process. The calling function should
2839  * allocate a few pages first with this function. Then when it
2840  * needs to get pages from the ring buffer, it passes the result
2841  * of this function into ring_buffer_read_page, which will swap
2842  * the page that was allocated, with the read page of the buffer.
2843  *
2844  * Returns:
2845  *  The page allocated, or NULL on error.
2846  */
2847 void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
2848 {
2849         struct buffer_data_page *bpage;
2850         unsigned long addr;
2851
2852         addr = __get_free_page(GFP_KERNEL);
2853         if (!addr)
2854                 return NULL;
2855
2856         bpage = (void *)addr;
2857
2858         rb_init_page(bpage);
2859
2860         return bpage;
2861 }
2862 EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
2863
2864 /**
2865  * ring_buffer_free_read_page - free an allocated read page
2866  * @buffer: the buffer the page was allocate for
2867  * @data: the page to free
2868  *
2869  * Free a page allocated from ring_buffer_alloc_read_page.
2870  */
2871 void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
2872 {
2873         free_page((unsigned long)data);
2874 }
2875 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
2876
2877 /**
2878  * ring_buffer_read_page - extract a page from the ring buffer
2879  * @buffer: buffer to extract from
2880  * @data_page: the page to use allocated from ring_buffer_alloc_read_page
2881  * @len: amount to extract
2882  * @cpu: the cpu of the buffer to extract
2883  * @full: should the extraction only happen when the page is full.
2884  *
2885  * This function will pull out a page from the ring buffer and consume it.
2886  * @data_page must be the address of the variable that was returned
2887  * from ring_buffer_alloc_read_page. This is because the page might be used
2888  * to swap with a page in the ring buffer.
2889  *
2890  * for example:
2891  *      rpage = ring_buffer_alloc_read_page(buffer);
2892  *      if (!rpage)
2893  *              return error;
2894  *      ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
2895  *      if (ret >= 0)
2896  *              process_page(rpage, ret);
2897  *
2898  * When @full is set, the function will not return true unless
2899  * the writer is off the reader page.
2900  *
2901  * Note: it is up to the calling functions to handle sleeps and wakeups.
2902  *  The ring buffer can be used anywhere in the kernel and can not
2903  *  blindly call wake_up. The layer that uses the ring buffer must be
2904  *  responsible for that.
2905  *
2906  * Returns:
2907  *  >=0 if data has been transferred, returns the offset of consumed data.
2908  *  <0 if no data has been transferred.
2909  */
2910 int ring_buffer_read_page(struct ring_buffer *buffer,
2911                           void **data_page, size_t len, int cpu, int full)
2912 {
2913         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2914         struct ring_buffer_event *event;
2915         struct buffer_data_page *bpage;
2916         struct buffer_page *reader;
2917         unsigned long flags;
2918         unsigned int commit;
2919         unsigned int read;
2920         u64 save_timestamp;
2921         int ret = -1;
2922
2923         if (!cpumask_test_cpu(cpu, buffer->cpumask))
2924                 goto out;
2925
2926         /*
2927          * If len is not big enough to hold the page header, then
2928          * we can not copy anything.
2929          */
2930         if (len <= BUF_PAGE_HDR_SIZE)
2931                 goto out;
2932
2933         len -= BUF_PAGE_HDR_SIZE;
2934
2935         if (!data_page)
2936                 goto out;
2937
2938         bpage = *data_page;
2939         if (!bpage)
2940                 goto out;
2941
2942         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2943
2944         reader = rb_get_reader_page(cpu_buffer);
2945         if (!reader)
2946                 goto out_unlock;
2947
2948         event = rb_reader_event(cpu_buffer);
2949
2950         read = reader->read;
2951         commit = rb_page_commit(reader);
2952
2953         /*
2954          * If this page has been partially read or
2955          * if len is not big enough to read the rest of the page or
2956          * a writer is still on the page, then
2957          * we must copy the data from the page to the buffer.
2958          * Otherwise, we can simply swap the page with the one passed in.
2959          */
2960         if (read || (len < (commit - read)) ||
2961             cpu_buffer->reader_page == cpu_buffer->commit_page) {
2962                 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
2963                 unsigned int rpos = read;
2964                 unsigned int pos = 0;
2965                 unsigned int size;
2966
2967                 if (full)
2968                         goto out_unlock;
2969
2970                 if (len > (commit - read))
2971                         len = (commit - read);
2972
2973                 size = rb_event_length(event);
2974
2975                 if (len < size)
2976                         goto out_unlock;
2977
2978                 /* save the current timestamp, since the user will need it */
2979                 save_timestamp = cpu_buffer->read_stamp;
2980
2981                 /* Need to copy one event at a time */
2982                 do {
2983                         memcpy(bpage->data + pos, rpage->data + rpos, size);
2984
2985                         len -= size;
2986
2987                         rb_advance_reader(cpu_buffer);
2988                         rpos = reader->read;
2989                         pos += size;
2990
2991                         event = rb_reader_event(cpu_buffer);
2992                         size = rb_event_length(event);
2993                 } while (len > size);
2994
2995                 /* update bpage */
2996                 local_set(&bpage->commit, pos);
2997                 bpage->time_stamp = save_timestamp;
2998
2999                 /* we copied everything to the beginning */
3000                 read = 0;
3001         } else {
3002                 /* update the entry counter */
3003                 cpu_buffer->read += local_read(&reader->entries);
3004
3005                 /* swap the pages */
3006                 rb_init_page(bpage);
3007                 bpage = reader->page;
3008                 reader->page = *data_page;
3009                 local_set(&reader->write, 0);
3010                 local_set(&reader->entries, 0);
3011                 reader->read = 0;
3012                 *data_page = bpage;
3013         }
3014         ret = read;
3015
3016  out_unlock:
3017         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3018
3019  out:
3020         return ret;
3021 }
3022 EXPORT_SYMBOL_GPL(ring_buffer_read_page);
3023
3024 static ssize_t
3025 rb_simple_read(struct file *filp, char __user *ubuf,
3026                size_t cnt, loff_t *ppos)
3027 {
3028         unsigned long *p = filp->private_data;
3029         char buf[64];
3030         int r;
3031
3032         if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
3033                 r = sprintf(buf, "permanently disabled\n");
3034         else
3035                 r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
3036
3037         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3038 }
3039
3040 static ssize_t
3041 rb_simple_write(struct file *filp, const char __user *ubuf,
3042                 size_t cnt, loff_t *ppos)
3043 {
3044         unsigned long *p = filp->private_data;
3045         char buf[64];
3046         unsigned long val;
3047         int ret;
3048
3049         if (cnt >= sizeof(buf))
3050                 return -EINVAL;
3051
3052         if (copy_from_user(&buf, ubuf, cnt))
3053                 return -EFAULT;
3054
3055         buf[cnt] = 0;
3056
3057         ret = strict_strtoul(buf, 10, &val);
3058         if (ret < 0)
3059                 return ret;
3060
3061         if (val)
3062                 set_bit(RB_BUFFERS_ON_BIT, p);
3063         else
3064                 clear_bit(RB_BUFFERS_ON_BIT, p);
3065
3066         (*ppos)++;
3067
3068         return cnt;
3069 }
3070
3071 static const struct file_operations rb_simple_fops = {
3072         .open           = tracing_open_generic,
3073         .read           = rb_simple_read,
3074         .write          = rb_simple_write,
3075 };
3076
3077
3078 static __init int rb_init_debugfs(void)
3079 {
3080         struct dentry *d_tracer;
3081
3082         d_tracer = tracing_init_dentry();
3083
3084         trace_create_file("tracing_on", 0644, d_tracer,
3085                             &ring_buffer_flags, &rb_simple_fops);
3086
3087         return 0;
3088 }
3089
3090 fs_initcall(rb_init_debugfs);
3091
3092 #ifdef CONFIG_HOTPLUG_CPU
3093 static int rb_cpu_notify(struct notifier_block *self,
3094                          unsigned long action, void *hcpu)
3095 {
3096         struct ring_buffer *buffer =
3097                 container_of(self, struct ring_buffer, cpu_notify);
3098         long cpu = (long)hcpu;
3099
3100         switch (action) {
3101         case CPU_UP_PREPARE:
3102         case CPU_UP_PREPARE_FROZEN:
3103                 if (cpu_isset(cpu, *buffer->cpumask))
3104                         return NOTIFY_OK;
3105
3106                 buffer->buffers[cpu] =
3107                         rb_allocate_cpu_buffer(buffer, cpu);
3108                 if (!buffer->buffers[cpu]) {
3109                         WARN(1, "failed to allocate ring buffer on CPU %ld\n",
3110                              cpu);
3111                         return NOTIFY_OK;
3112                 }
3113                 smp_wmb();
3114                 cpu_set(cpu, *buffer->cpumask);
3115                 break;
3116         case CPU_DOWN_PREPARE:
3117         case CPU_DOWN_PREPARE_FROZEN:
3118                 /*
3119                  * Do nothing.
3120                  *  If we were to free the buffer, then the user would
3121                  *  lose any trace that was in the buffer.
3122                  */
3123                 break;
3124         default:
3125                 break;
3126         }
3127         return NOTIFY_OK;
3128 }
3129 #endif