ring-buffer: fix dangling commit race
[safe/jmp/linux-2.6] / kernel / trace / ring_buffer.c
1 /*
2  * Generic ring buffer
3  *
4  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5  */
6 #include <linux/ring_buffer.h>
7 #include <linux/spinlock.h>
8 #include <linux/debugfs.h>
9 #include <linux/uaccess.h>
10 #include <linux/module.h>
11 #include <linux/percpu.h>
12 #include <linux/mutex.h>
13 #include <linux/sched.h>        /* used for sched_clock() (for now) */
14 #include <linux/init.h>
15 #include <linux/hash.h>
16 #include <linux/list.h>
17 #include <linux/fs.h>
18
19 #include "trace.h"
20
21 /*
22  * A fast way to enable or disable all ring buffers is to
23  * call tracing_on or tracing_off. Turning off the ring buffers
24  * prevents all ring buffers from being recorded to.
25  * Turning this switch on, makes it OK to write to the
26  * ring buffer, if the ring buffer is enabled itself.
27  *
28  * There's three layers that must be on in order to write
29  * to the ring buffer.
30  *
31  * 1) This global flag must be set.
32  * 2) The ring buffer must be enabled for recording.
33  * 3) The per cpu buffer must be enabled for recording.
34  *
35  * In case of an anomaly, this global flag has a bit set that
36  * will permantly disable all ring buffers.
37  */
38
39 /*
40  * Global flag to disable all recording to ring buffers
41  *  This has two bits: ON, DISABLED
42  *
43  *  ON   DISABLED
44  * ---- ----------
45  *   0      0        : ring buffers are off
46  *   1      0        : ring buffers are on
47  *   X      1        : ring buffers are permanently disabled
48  */
49
50 enum {
51         RB_BUFFERS_ON_BIT       = 0,
52         RB_BUFFERS_DISABLED_BIT = 1,
53 };
54
55 enum {
56         RB_BUFFERS_ON           = 1 << RB_BUFFERS_ON_BIT,
57         RB_BUFFERS_DISABLED     = 1 << RB_BUFFERS_DISABLED_BIT,
58 };
59
60 static long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
61
62 /**
63  * tracing_on - enable all tracing buffers
64  *
65  * This function enables all tracing buffers that may have been
66  * disabled with tracing_off.
67  */
68 void tracing_on(void)
69 {
70         set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
71 }
72
73 /**
74  * tracing_off - turn off all tracing buffers
75  *
76  * This function stops all tracing buffers from recording data.
77  * It does not disable any overhead the tracers themselves may
78  * be causing. This function simply causes all recording to
79  * the ring buffers to fail.
80  */
81 void tracing_off(void)
82 {
83         clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
84 }
85
86 /**
87  * tracing_off_permanent - permanently disable ring buffers
88  *
89  * This function, once called, will disable all ring buffers
90  * permanenty.
91  */
92 void tracing_off_permanent(void)
93 {
94         set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
95 }
96
97 #include "trace.h"
98
99 /* Up this if you want to test the TIME_EXTENTS and normalization */
100 #define DEBUG_SHIFT 0
101
102 /* FIXME!!! */
103 u64 ring_buffer_time_stamp(int cpu)
104 {
105         u64 time;
106
107         preempt_disable_notrace();
108         /* shift to debug/test normalization and TIME_EXTENTS */
109         time = sched_clock() << DEBUG_SHIFT;
110         preempt_enable_no_resched_notrace();
111
112         return time;
113 }
114
115 void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
116 {
117         /* Just stupid testing the normalize function and deltas */
118         *ts >>= DEBUG_SHIFT;
119 }
120
121 #define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
122 #define RB_ALIGNMENT_SHIFT      2
123 #define RB_ALIGNMENT            (1 << RB_ALIGNMENT_SHIFT)
124 #define RB_MAX_SMALL_DATA       28
125
126 enum {
127         RB_LEN_TIME_EXTEND = 8,
128         RB_LEN_TIME_STAMP = 16,
129 };
130
131 /* inline for ring buffer fast paths */
132 static inline unsigned
133 rb_event_length(struct ring_buffer_event *event)
134 {
135         unsigned length;
136
137         switch (event->type) {
138         case RINGBUF_TYPE_PADDING:
139                 /* undefined */
140                 return -1;
141
142         case RINGBUF_TYPE_TIME_EXTEND:
143                 return RB_LEN_TIME_EXTEND;
144
145         case RINGBUF_TYPE_TIME_STAMP:
146                 return RB_LEN_TIME_STAMP;
147
148         case RINGBUF_TYPE_DATA:
149                 if (event->len)
150                         length = event->len << RB_ALIGNMENT_SHIFT;
151                 else
152                         length = event->array[0];
153                 return length + RB_EVNT_HDR_SIZE;
154         default:
155                 BUG();
156         }
157         /* not hit */
158         return 0;
159 }
160
161 /**
162  * ring_buffer_event_length - return the length of the event
163  * @event: the event to get the length of
164  */
165 unsigned ring_buffer_event_length(struct ring_buffer_event *event)
166 {
167         return rb_event_length(event);
168 }
169
170 /* inline for ring buffer fast paths */
171 static inline void *
172 rb_event_data(struct ring_buffer_event *event)
173 {
174         BUG_ON(event->type != RINGBUF_TYPE_DATA);
175         /* If length is in len field, then array[0] has the data */
176         if (event->len)
177                 return (void *)&event->array[0];
178         /* Otherwise length is in array[0] and array[1] has the data */
179         return (void *)&event->array[1];
180 }
181
182 /**
183  * ring_buffer_event_data - return the data of the event
184  * @event: the event to get the data from
185  */
186 void *ring_buffer_event_data(struct ring_buffer_event *event)
187 {
188         return rb_event_data(event);
189 }
190
191 #define for_each_buffer_cpu(buffer, cpu)                \
192         for_each_cpu_mask(cpu, buffer->cpumask)
193
194 #define TS_SHIFT        27
195 #define TS_MASK         ((1ULL << TS_SHIFT) - 1)
196 #define TS_DELTA_TEST   (~TS_MASK)
197
198 struct buffer_data_page {
199         u64              time_stamp;    /* page time stamp */
200         local_t          commit;        /* write commited index */
201         unsigned char    data[];        /* data of buffer page */
202 };
203
204 struct buffer_page {
205         local_t          write;         /* index for next write */
206         unsigned         read;          /* index for next read */
207         struct list_head list;          /* list of free pages */
208         struct buffer_data_page *page;  /* Actual data page */
209 };
210
211 static void rb_init_page(struct buffer_data_page *bpage)
212 {
213         local_set(&bpage->commit, 0);
214 }
215
216 /*
217  * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
218  * this issue out.
219  */
220 static inline void free_buffer_page(struct buffer_page *bpage)
221 {
222         if (bpage->page)
223                 free_page((unsigned long)bpage->page);
224         kfree(bpage);
225 }
226
227 /*
228  * We need to fit the time_stamp delta into 27 bits.
229  */
230 static inline int test_time_stamp(u64 delta)
231 {
232         if (delta & TS_DELTA_TEST)
233                 return 1;
234         return 0;
235 }
236
237 #define BUF_PAGE_SIZE (PAGE_SIZE - sizeof(struct buffer_data_page))
238
239 /*
240  * head_page == tail_page && head == tail then buffer is empty.
241  */
242 struct ring_buffer_per_cpu {
243         int                             cpu;
244         struct ring_buffer              *buffer;
245         spinlock_t                      reader_lock; /* serialize readers */
246         raw_spinlock_t                  lock;
247         struct lock_class_key           lock_key;
248         struct list_head                pages;
249         struct buffer_page              *head_page;     /* read from head */
250         struct buffer_page              *tail_page;     /* write to tail */
251         struct buffer_page              *commit_page;   /* commited pages */
252         struct buffer_page              *reader_page;
253         unsigned long                   overrun;
254         unsigned long                   entries;
255         u64                             write_stamp;
256         u64                             read_stamp;
257         atomic_t                        record_disabled;
258 };
259
260 struct ring_buffer {
261         unsigned                        pages;
262         unsigned                        flags;
263         int                             cpus;
264         cpumask_t                       cpumask;
265         atomic_t                        record_disabled;
266
267         struct mutex                    mutex;
268
269         struct ring_buffer_per_cpu      **buffers;
270 };
271
272 struct ring_buffer_iter {
273         struct ring_buffer_per_cpu      *cpu_buffer;
274         unsigned long                   head;
275         struct buffer_page              *head_page;
276         u64                             read_stamp;
277 };
278
279 /* buffer may be either ring_buffer or ring_buffer_per_cpu */
280 #define RB_WARN_ON(buffer, cond)                                \
281         ({                                                      \
282                 int _____ret = unlikely(cond);                  \
283                 if (_____ret) {                                 \
284                         atomic_inc(&buffer->record_disabled);   \
285                         WARN_ON(1);                             \
286                 }                                               \
287                 _____ret;                                       \
288         })
289
290 /**
291  * check_pages - integrity check of buffer pages
292  * @cpu_buffer: CPU buffer with pages to test
293  *
294  * As a safty measure we check to make sure the data pages have not
295  * been corrupted.
296  */
297 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
298 {
299         struct list_head *head = &cpu_buffer->pages;
300         struct buffer_page *bpage, *tmp;
301
302         if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
303                 return -1;
304         if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
305                 return -1;
306
307         list_for_each_entry_safe(bpage, tmp, head, list) {
308                 if (RB_WARN_ON(cpu_buffer,
309                                bpage->list.next->prev != &bpage->list))
310                         return -1;
311                 if (RB_WARN_ON(cpu_buffer,
312                                bpage->list.prev->next != &bpage->list))
313                         return -1;
314         }
315
316         return 0;
317 }
318
319 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
320                              unsigned nr_pages)
321 {
322         struct list_head *head = &cpu_buffer->pages;
323         struct buffer_page *bpage, *tmp;
324         unsigned long addr;
325         LIST_HEAD(pages);
326         unsigned i;
327
328         for (i = 0; i < nr_pages; i++) {
329                 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
330                                     GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
331                 if (!bpage)
332                         goto free_pages;
333                 list_add(&bpage->list, &pages);
334
335                 addr = __get_free_page(GFP_KERNEL);
336                 if (!addr)
337                         goto free_pages;
338                 bpage->page = (void *)addr;
339                 rb_init_page(bpage->page);
340         }
341
342         list_splice(&pages, head);
343
344         rb_check_pages(cpu_buffer);
345
346         return 0;
347
348  free_pages:
349         list_for_each_entry_safe(bpage, tmp, &pages, list) {
350                 list_del_init(&bpage->list);
351                 free_buffer_page(bpage);
352         }
353         return -ENOMEM;
354 }
355
356 static struct ring_buffer_per_cpu *
357 rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
358 {
359         struct ring_buffer_per_cpu *cpu_buffer;
360         struct buffer_page *bpage;
361         unsigned long addr;
362         int ret;
363
364         cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
365                                   GFP_KERNEL, cpu_to_node(cpu));
366         if (!cpu_buffer)
367                 return NULL;
368
369         cpu_buffer->cpu = cpu;
370         cpu_buffer->buffer = buffer;
371         spin_lock_init(&cpu_buffer->reader_lock);
372         cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
373         INIT_LIST_HEAD(&cpu_buffer->pages);
374
375         bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
376                             GFP_KERNEL, cpu_to_node(cpu));
377         if (!bpage)
378                 goto fail_free_buffer;
379
380         cpu_buffer->reader_page = bpage;
381         addr = __get_free_page(GFP_KERNEL);
382         if (!addr)
383                 goto fail_free_reader;
384         bpage->page = (void *)addr;
385         rb_init_page(bpage->page);
386
387         INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
388
389         ret = rb_allocate_pages(cpu_buffer, buffer->pages);
390         if (ret < 0)
391                 goto fail_free_reader;
392
393         cpu_buffer->head_page
394                 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
395         cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
396
397         return cpu_buffer;
398
399  fail_free_reader:
400         free_buffer_page(cpu_buffer->reader_page);
401
402  fail_free_buffer:
403         kfree(cpu_buffer);
404         return NULL;
405 }
406
407 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
408 {
409         struct list_head *head = &cpu_buffer->pages;
410         struct buffer_page *bpage, *tmp;
411
412         list_del_init(&cpu_buffer->reader_page->list);
413         free_buffer_page(cpu_buffer->reader_page);
414
415         list_for_each_entry_safe(bpage, tmp, head, list) {
416                 list_del_init(&bpage->list);
417                 free_buffer_page(bpage);
418         }
419         kfree(cpu_buffer);
420 }
421
422 /*
423  * Causes compile errors if the struct buffer_page gets bigger
424  * than the struct page.
425  */
426 extern int ring_buffer_page_too_big(void);
427
428 /**
429  * ring_buffer_alloc - allocate a new ring_buffer
430  * @size: the size in bytes that is needed.
431  * @flags: attributes to set for the ring buffer.
432  *
433  * Currently the only flag that is available is the RB_FL_OVERWRITE
434  * flag. This flag means that the buffer will overwrite old data
435  * when the buffer wraps. If this flag is not set, the buffer will
436  * drop data when the tail hits the head.
437  */
438 struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
439 {
440         struct ring_buffer *buffer;
441         int bsize;
442         int cpu;
443
444         /* Paranoid! Optimizes out when all is well */
445         if (sizeof(struct buffer_page) > sizeof(struct page))
446                 ring_buffer_page_too_big();
447
448
449         /* keep it in its own cache line */
450         buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
451                          GFP_KERNEL);
452         if (!buffer)
453                 return NULL;
454
455         buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
456         buffer->flags = flags;
457
458         /* need at least two pages */
459         if (buffer->pages == 1)
460                 buffer->pages++;
461
462         buffer->cpumask = cpu_possible_map;
463         buffer->cpus = nr_cpu_ids;
464
465         bsize = sizeof(void *) * nr_cpu_ids;
466         buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
467                                   GFP_KERNEL);
468         if (!buffer->buffers)
469                 goto fail_free_buffer;
470
471         for_each_buffer_cpu(buffer, cpu) {
472                 buffer->buffers[cpu] =
473                         rb_allocate_cpu_buffer(buffer, cpu);
474                 if (!buffer->buffers[cpu])
475                         goto fail_free_buffers;
476         }
477
478         mutex_init(&buffer->mutex);
479
480         return buffer;
481
482  fail_free_buffers:
483         for_each_buffer_cpu(buffer, cpu) {
484                 if (buffer->buffers[cpu])
485                         rb_free_cpu_buffer(buffer->buffers[cpu]);
486         }
487         kfree(buffer->buffers);
488
489  fail_free_buffer:
490         kfree(buffer);
491         return NULL;
492 }
493
494 /**
495  * ring_buffer_free - free a ring buffer.
496  * @buffer: the buffer to free.
497  */
498 void
499 ring_buffer_free(struct ring_buffer *buffer)
500 {
501         int cpu;
502
503         for_each_buffer_cpu(buffer, cpu)
504                 rb_free_cpu_buffer(buffer->buffers[cpu]);
505
506         kfree(buffer);
507 }
508
509 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
510
511 static void
512 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
513 {
514         struct buffer_page *bpage;
515         struct list_head *p;
516         unsigned i;
517
518         atomic_inc(&cpu_buffer->record_disabled);
519         synchronize_sched();
520
521         for (i = 0; i < nr_pages; i++) {
522                 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
523                         return;
524                 p = cpu_buffer->pages.next;
525                 bpage = list_entry(p, struct buffer_page, list);
526                 list_del_init(&bpage->list);
527                 free_buffer_page(bpage);
528         }
529         if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
530                 return;
531
532         rb_reset_cpu(cpu_buffer);
533
534         rb_check_pages(cpu_buffer);
535
536         atomic_dec(&cpu_buffer->record_disabled);
537
538 }
539
540 static void
541 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
542                 struct list_head *pages, unsigned nr_pages)
543 {
544         struct buffer_page *bpage;
545         struct list_head *p;
546         unsigned i;
547
548         atomic_inc(&cpu_buffer->record_disabled);
549         synchronize_sched();
550
551         for (i = 0; i < nr_pages; i++) {
552                 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
553                         return;
554                 p = pages->next;
555                 bpage = list_entry(p, struct buffer_page, list);
556                 list_del_init(&bpage->list);
557                 list_add_tail(&bpage->list, &cpu_buffer->pages);
558         }
559         rb_reset_cpu(cpu_buffer);
560
561         rb_check_pages(cpu_buffer);
562
563         atomic_dec(&cpu_buffer->record_disabled);
564 }
565
566 /**
567  * ring_buffer_resize - resize the ring buffer
568  * @buffer: the buffer to resize.
569  * @size: the new size.
570  *
571  * The tracer is responsible for making sure that the buffer is
572  * not being used while changing the size.
573  * Note: We may be able to change the above requirement by using
574  *  RCU synchronizations.
575  *
576  * Minimum size is 2 * BUF_PAGE_SIZE.
577  *
578  * Returns -1 on failure.
579  */
580 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
581 {
582         struct ring_buffer_per_cpu *cpu_buffer;
583         unsigned nr_pages, rm_pages, new_pages;
584         struct buffer_page *bpage, *tmp;
585         unsigned long buffer_size;
586         unsigned long addr;
587         LIST_HEAD(pages);
588         int i, cpu;
589
590         /*
591          * Always succeed at resizing a non-existent buffer:
592          */
593         if (!buffer)
594                 return size;
595
596         size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
597         size *= BUF_PAGE_SIZE;
598         buffer_size = buffer->pages * BUF_PAGE_SIZE;
599
600         /* we need a minimum of two pages */
601         if (size < BUF_PAGE_SIZE * 2)
602                 size = BUF_PAGE_SIZE * 2;
603
604         if (size == buffer_size)
605                 return size;
606
607         mutex_lock(&buffer->mutex);
608
609         nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
610
611         if (size < buffer_size) {
612
613                 /* easy case, just free pages */
614                 if (RB_WARN_ON(buffer, nr_pages >= buffer->pages)) {
615                         mutex_unlock(&buffer->mutex);
616                         return -1;
617                 }
618
619                 rm_pages = buffer->pages - nr_pages;
620
621                 for_each_buffer_cpu(buffer, cpu) {
622                         cpu_buffer = buffer->buffers[cpu];
623                         rb_remove_pages(cpu_buffer, rm_pages);
624                 }
625                 goto out;
626         }
627
628         /*
629          * This is a bit more difficult. We only want to add pages
630          * when we can allocate enough for all CPUs. We do this
631          * by allocating all the pages and storing them on a local
632          * link list. If we succeed in our allocation, then we
633          * add these pages to the cpu_buffers. Otherwise we just free
634          * them all and return -ENOMEM;
635          */
636         if (RB_WARN_ON(buffer, nr_pages <= buffer->pages)) {
637                 mutex_unlock(&buffer->mutex);
638                 return -1;
639         }
640
641         new_pages = nr_pages - buffer->pages;
642
643         for_each_buffer_cpu(buffer, cpu) {
644                 for (i = 0; i < new_pages; i++) {
645                         bpage = kzalloc_node(ALIGN(sizeof(*bpage),
646                                                   cache_line_size()),
647                                             GFP_KERNEL, cpu_to_node(cpu));
648                         if (!bpage)
649                                 goto free_pages;
650                         list_add(&bpage->list, &pages);
651                         addr = __get_free_page(GFP_KERNEL);
652                         if (!addr)
653                                 goto free_pages;
654                         bpage->page = (void *)addr;
655                         rb_init_page(bpage->page);
656                 }
657         }
658
659         for_each_buffer_cpu(buffer, cpu) {
660                 cpu_buffer = buffer->buffers[cpu];
661                 rb_insert_pages(cpu_buffer, &pages, new_pages);
662         }
663
664         if (RB_WARN_ON(buffer, !list_empty(&pages))) {
665                 mutex_unlock(&buffer->mutex);
666                 return -1;
667         }
668
669  out:
670         buffer->pages = nr_pages;
671         mutex_unlock(&buffer->mutex);
672
673         return size;
674
675  free_pages:
676         list_for_each_entry_safe(bpage, tmp, &pages, list) {
677                 list_del_init(&bpage->list);
678                 free_buffer_page(bpage);
679         }
680         mutex_unlock(&buffer->mutex);
681         return -ENOMEM;
682 }
683
684 static inline int rb_null_event(struct ring_buffer_event *event)
685 {
686         return event->type == RINGBUF_TYPE_PADDING;
687 }
688
689 static inline void *
690 __rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
691 {
692         return bpage->data + index;
693 }
694
695 static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
696 {
697         return bpage->page->data + index;
698 }
699
700 static inline struct ring_buffer_event *
701 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
702 {
703         return __rb_page_index(cpu_buffer->reader_page,
704                                cpu_buffer->reader_page->read);
705 }
706
707 static inline struct ring_buffer_event *
708 rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
709 {
710         return __rb_page_index(cpu_buffer->head_page,
711                                cpu_buffer->head_page->read);
712 }
713
714 static inline struct ring_buffer_event *
715 rb_iter_head_event(struct ring_buffer_iter *iter)
716 {
717         return __rb_page_index(iter->head_page, iter->head);
718 }
719
720 static inline unsigned rb_page_write(struct buffer_page *bpage)
721 {
722         return local_read(&bpage->write);
723 }
724
725 static inline unsigned rb_page_commit(struct buffer_page *bpage)
726 {
727         return local_read(&bpage->page->commit);
728 }
729
730 /* Size is determined by what has been commited */
731 static inline unsigned rb_page_size(struct buffer_page *bpage)
732 {
733         return rb_page_commit(bpage);
734 }
735
736 static inline unsigned
737 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
738 {
739         return rb_page_commit(cpu_buffer->commit_page);
740 }
741
742 static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
743 {
744         return rb_page_commit(cpu_buffer->head_page);
745 }
746
747 /*
748  * When the tail hits the head and the buffer is in overwrite mode,
749  * the head jumps to the next page and all content on the previous
750  * page is discarded. But before doing so, we update the overrun
751  * variable of the buffer.
752  */
753 static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
754 {
755         struct ring_buffer_event *event;
756         unsigned long head;
757
758         for (head = 0; head < rb_head_size(cpu_buffer);
759              head += rb_event_length(event)) {
760
761                 event = __rb_page_index(cpu_buffer->head_page, head);
762                 if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
763                         return;
764                 /* Only count data entries */
765                 if (event->type != RINGBUF_TYPE_DATA)
766                         continue;
767                 cpu_buffer->overrun++;
768                 cpu_buffer->entries--;
769         }
770 }
771
772 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
773                                struct buffer_page **bpage)
774 {
775         struct list_head *p = (*bpage)->list.next;
776
777         if (p == &cpu_buffer->pages)
778                 p = p->next;
779
780         *bpage = list_entry(p, struct buffer_page, list);
781 }
782
783 static inline unsigned
784 rb_event_index(struct ring_buffer_event *event)
785 {
786         unsigned long addr = (unsigned long)event;
787
788         return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
789 }
790
791 static inline int
792 rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
793              struct ring_buffer_event *event)
794 {
795         unsigned long addr = (unsigned long)event;
796         unsigned long index;
797
798         index = rb_event_index(event);
799         addr &= PAGE_MASK;
800
801         return cpu_buffer->commit_page->page == (void *)addr &&
802                 rb_commit_index(cpu_buffer) == index;
803 }
804
805 static inline void
806 rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
807                     struct ring_buffer_event *event)
808 {
809         unsigned long addr = (unsigned long)event;
810         unsigned long index;
811
812         index = rb_event_index(event);
813         addr &= PAGE_MASK;
814
815         while (cpu_buffer->commit_page->page != (void *)addr) {
816                 if (RB_WARN_ON(cpu_buffer,
817                           cpu_buffer->commit_page == cpu_buffer->tail_page))
818                         return;
819                 cpu_buffer->commit_page->page->commit =
820                         cpu_buffer->commit_page->write;
821                 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
822                 cpu_buffer->write_stamp =
823                         cpu_buffer->commit_page->page->time_stamp;
824         }
825
826         /* Now set the commit to the event's index */
827         local_set(&cpu_buffer->commit_page->page->commit, index);
828 }
829
830 static inline void
831 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
832 {
833         /*
834          * We only race with interrupts and NMIs on this CPU.
835          * If we own the commit event, then we can commit
836          * all others that interrupted us, since the interruptions
837          * are in stack format (they finish before they come
838          * back to us). This allows us to do a simple loop to
839          * assign the commit to the tail.
840          */
841  again:
842         while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
843                 cpu_buffer->commit_page->page->commit =
844                         cpu_buffer->commit_page->write;
845                 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
846                 cpu_buffer->write_stamp =
847                         cpu_buffer->commit_page->page->time_stamp;
848                 /* add barrier to keep gcc from optimizing too much */
849                 barrier();
850         }
851         while (rb_commit_index(cpu_buffer) !=
852                rb_page_write(cpu_buffer->commit_page)) {
853                 cpu_buffer->commit_page->page->commit =
854                         cpu_buffer->commit_page->write;
855                 barrier();
856         }
857
858         /* again, keep gcc from optimizing */
859         barrier();
860
861         /*
862          * If an interrupt came in just after the first while loop
863          * and pushed the tail page forward, we will be left with
864          * a dangling commit that will never go forward.
865          */
866         if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
867                 goto again;
868 }
869
870 static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
871 {
872         cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
873         cpu_buffer->reader_page->read = 0;
874 }
875
876 static inline void rb_inc_iter(struct ring_buffer_iter *iter)
877 {
878         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
879
880         /*
881          * The iterator could be on the reader page (it starts there).
882          * But the head could have moved, since the reader was
883          * found. Check for this case and assign the iterator
884          * to the head page instead of next.
885          */
886         if (iter->head_page == cpu_buffer->reader_page)
887                 iter->head_page = cpu_buffer->head_page;
888         else
889                 rb_inc_page(cpu_buffer, &iter->head_page);
890
891         iter->read_stamp = iter->head_page->page->time_stamp;
892         iter->head = 0;
893 }
894
895 /**
896  * ring_buffer_update_event - update event type and data
897  * @event: the even to update
898  * @type: the type of event
899  * @length: the size of the event field in the ring buffer
900  *
901  * Update the type and data fields of the event. The length
902  * is the actual size that is written to the ring buffer,
903  * and with this, we can determine what to place into the
904  * data field.
905  */
906 static inline void
907 rb_update_event(struct ring_buffer_event *event,
908                          unsigned type, unsigned length)
909 {
910         event->type = type;
911
912         switch (type) {
913
914         case RINGBUF_TYPE_PADDING:
915                 break;
916
917         case RINGBUF_TYPE_TIME_EXTEND:
918                 event->len =
919                         (RB_LEN_TIME_EXTEND + (RB_ALIGNMENT-1))
920                         >> RB_ALIGNMENT_SHIFT;
921                 break;
922
923         case RINGBUF_TYPE_TIME_STAMP:
924                 event->len =
925                         (RB_LEN_TIME_STAMP + (RB_ALIGNMENT-1))
926                         >> RB_ALIGNMENT_SHIFT;
927                 break;
928
929         case RINGBUF_TYPE_DATA:
930                 length -= RB_EVNT_HDR_SIZE;
931                 if (length > RB_MAX_SMALL_DATA) {
932                         event->len = 0;
933                         event->array[0] = length;
934                 } else
935                         event->len =
936                                 (length + (RB_ALIGNMENT-1))
937                                 >> RB_ALIGNMENT_SHIFT;
938                 break;
939         default:
940                 BUG();
941         }
942 }
943
944 static inline unsigned rb_calculate_event_length(unsigned length)
945 {
946         struct ring_buffer_event event; /* Used only for sizeof array */
947
948         /* zero length can cause confusions */
949         if (!length)
950                 length = 1;
951
952         if (length > RB_MAX_SMALL_DATA)
953                 length += sizeof(event.array[0]);
954
955         length += RB_EVNT_HDR_SIZE;
956         length = ALIGN(length, RB_ALIGNMENT);
957
958         return length;
959 }
960
961 static struct ring_buffer_event *
962 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
963                   unsigned type, unsigned long length, u64 *ts)
964 {
965         struct buffer_page *tail_page, *head_page, *reader_page;
966         unsigned long tail, write;
967         struct ring_buffer *buffer = cpu_buffer->buffer;
968         struct ring_buffer_event *event;
969         unsigned long flags;
970
971         tail_page = cpu_buffer->tail_page;
972         write = local_add_return(length, &tail_page->write);
973         tail = write - length;
974
975         /* See if we shot pass the end of this buffer page */
976         if (write > BUF_PAGE_SIZE) {
977                 struct buffer_page *next_page = tail_page;
978
979                 local_irq_save(flags);
980                 __raw_spin_lock(&cpu_buffer->lock);
981
982                 rb_inc_page(cpu_buffer, &next_page);
983
984                 head_page = cpu_buffer->head_page;
985                 reader_page = cpu_buffer->reader_page;
986
987                 /* we grabbed the lock before incrementing */
988                 if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
989                         goto out_unlock;
990
991                 /*
992                  * If for some reason, we had an interrupt storm that made
993                  * it all the way around the buffer, bail, and warn
994                  * about it.
995                  */
996                 if (unlikely(next_page == cpu_buffer->commit_page)) {
997                         WARN_ON_ONCE(1);
998                         goto out_unlock;
999                 }
1000
1001                 if (next_page == head_page) {
1002                         if (!(buffer->flags & RB_FL_OVERWRITE)) {
1003                                 /* reset write */
1004                                 if (tail <= BUF_PAGE_SIZE)
1005                                         local_set(&tail_page->write, tail);
1006                                 goto out_unlock;
1007                         }
1008
1009                         /* tail_page has not moved yet? */
1010                         if (tail_page == cpu_buffer->tail_page) {
1011                                 /* count overflows */
1012                                 rb_update_overflow(cpu_buffer);
1013
1014                                 rb_inc_page(cpu_buffer, &head_page);
1015                                 cpu_buffer->head_page = head_page;
1016                                 cpu_buffer->head_page->read = 0;
1017                         }
1018                 }
1019
1020                 /*
1021                  * If the tail page is still the same as what we think
1022                  * it is, then it is up to us to update the tail
1023                  * pointer.
1024                  */
1025                 if (tail_page == cpu_buffer->tail_page) {
1026                         local_set(&next_page->write, 0);
1027                         local_set(&next_page->page->commit, 0);
1028                         cpu_buffer->tail_page = next_page;
1029
1030                         /* reread the time stamp */
1031                         *ts = ring_buffer_time_stamp(cpu_buffer->cpu);
1032                         cpu_buffer->tail_page->page->time_stamp = *ts;
1033                 }
1034
1035                 /*
1036                  * The actual tail page has moved forward.
1037                  */
1038                 if (tail < BUF_PAGE_SIZE) {
1039                         /* Mark the rest of the page with padding */
1040                         event = __rb_page_index(tail_page, tail);
1041                         event->type = RINGBUF_TYPE_PADDING;
1042                 }
1043
1044                 if (tail <= BUF_PAGE_SIZE)
1045                         /* Set the write back to the previous setting */
1046                         local_set(&tail_page->write, tail);
1047
1048                 /*
1049                  * If this was a commit entry that failed,
1050                  * increment that too
1051                  */
1052                 if (tail_page == cpu_buffer->commit_page &&
1053                     tail == rb_commit_index(cpu_buffer)) {
1054                         rb_set_commit_to_write(cpu_buffer);
1055                 }
1056
1057                 __raw_spin_unlock(&cpu_buffer->lock);
1058                 local_irq_restore(flags);
1059
1060                 /* fail and let the caller try again */
1061                 return ERR_PTR(-EAGAIN);
1062         }
1063
1064         /* We reserved something on the buffer */
1065
1066         if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE))
1067                 return NULL;
1068
1069         event = __rb_page_index(tail_page, tail);
1070         rb_update_event(event, type, length);
1071
1072         /*
1073          * If this is a commit and the tail is zero, then update
1074          * this page's time stamp.
1075          */
1076         if (!tail && rb_is_commit(cpu_buffer, event))
1077                 cpu_buffer->commit_page->page->time_stamp = *ts;
1078
1079         return event;
1080
1081  out_unlock:
1082         __raw_spin_unlock(&cpu_buffer->lock);
1083         local_irq_restore(flags);
1084         return NULL;
1085 }
1086
1087 static int
1088 rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1089                   u64 *ts, u64 *delta)
1090 {
1091         struct ring_buffer_event *event;
1092         static int once;
1093         int ret;
1094
1095         if (unlikely(*delta > (1ULL << 59) && !once++)) {
1096                 printk(KERN_WARNING "Delta way too big! %llu"
1097                        " ts=%llu write stamp = %llu\n",
1098                        (unsigned long long)*delta,
1099                        (unsigned long long)*ts,
1100                        (unsigned long long)cpu_buffer->write_stamp);
1101                 WARN_ON(1);
1102         }
1103
1104         /*
1105          * The delta is too big, we to add a
1106          * new timestamp.
1107          */
1108         event = __rb_reserve_next(cpu_buffer,
1109                                   RINGBUF_TYPE_TIME_EXTEND,
1110                                   RB_LEN_TIME_EXTEND,
1111                                   ts);
1112         if (!event)
1113                 return -EBUSY;
1114
1115         if (PTR_ERR(event) == -EAGAIN)
1116                 return -EAGAIN;
1117
1118         /* Only a commited time event can update the write stamp */
1119         if (rb_is_commit(cpu_buffer, event)) {
1120                 /*
1121                  * If this is the first on the page, then we need to
1122                  * update the page itself, and just put in a zero.
1123                  */
1124                 if (rb_event_index(event)) {
1125                         event->time_delta = *delta & TS_MASK;
1126                         event->array[0] = *delta >> TS_SHIFT;
1127                 } else {
1128                         cpu_buffer->commit_page->page->time_stamp = *ts;
1129                         event->time_delta = 0;
1130                         event->array[0] = 0;
1131                 }
1132                 cpu_buffer->write_stamp = *ts;
1133                 /* let the caller know this was the commit */
1134                 ret = 1;
1135         } else {
1136                 /* Darn, this is just wasted space */
1137                 event->time_delta = 0;
1138                 event->array[0] = 0;
1139                 ret = 0;
1140         }
1141
1142         *delta = 0;
1143
1144         return ret;
1145 }
1146
1147 static struct ring_buffer_event *
1148 rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1149                       unsigned type, unsigned long length)
1150 {
1151         struct ring_buffer_event *event;
1152         u64 ts, delta;
1153         int commit = 0;
1154         int nr_loops = 0;
1155
1156  again:
1157         /*
1158          * We allow for interrupts to reenter here and do a trace.
1159          * If one does, it will cause this original code to loop
1160          * back here. Even with heavy interrupts happening, this
1161          * should only happen a few times in a row. If this happens
1162          * 1000 times in a row, there must be either an interrupt
1163          * storm or we have something buggy.
1164          * Bail!
1165          */
1166         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
1167                 return NULL;
1168
1169         ts = ring_buffer_time_stamp(cpu_buffer->cpu);
1170
1171         /*
1172          * Only the first commit can update the timestamp.
1173          * Yes there is a race here. If an interrupt comes in
1174          * just after the conditional and it traces too, then it
1175          * will also check the deltas. More than one timestamp may
1176          * also be made. But only the entry that did the actual
1177          * commit will be something other than zero.
1178          */
1179         if (cpu_buffer->tail_page == cpu_buffer->commit_page &&
1180             rb_page_write(cpu_buffer->tail_page) ==
1181             rb_commit_index(cpu_buffer)) {
1182
1183                 delta = ts - cpu_buffer->write_stamp;
1184
1185                 /* make sure this delta is calculated here */
1186                 barrier();
1187
1188                 /* Did the write stamp get updated already? */
1189                 if (unlikely(ts < cpu_buffer->write_stamp))
1190                         delta = 0;
1191
1192                 if (test_time_stamp(delta)) {
1193
1194                         commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
1195
1196                         if (commit == -EBUSY)
1197                                 return NULL;
1198
1199                         if (commit == -EAGAIN)
1200                                 goto again;
1201
1202                         RB_WARN_ON(cpu_buffer, commit < 0);
1203                 }
1204         } else
1205                 /* Non commits have zero deltas */
1206                 delta = 0;
1207
1208         event = __rb_reserve_next(cpu_buffer, type, length, &ts);
1209         if (PTR_ERR(event) == -EAGAIN)
1210                 goto again;
1211
1212         if (!event) {
1213                 if (unlikely(commit))
1214                         /*
1215                          * Ouch! We needed a timestamp and it was commited. But
1216                          * we didn't get our event reserved.
1217                          */
1218                         rb_set_commit_to_write(cpu_buffer);
1219                 return NULL;
1220         }
1221
1222         /*
1223          * If the timestamp was commited, make the commit our entry
1224          * now so that we will update it when needed.
1225          */
1226         if (commit)
1227                 rb_set_commit_event(cpu_buffer, event);
1228         else if (!rb_is_commit(cpu_buffer, event))
1229                 delta = 0;
1230
1231         event->time_delta = delta;
1232
1233         return event;
1234 }
1235
1236 static DEFINE_PER_CPU(int, rb_need_resched);
1237
1238 /**
1239  * ring_buffer_lock_reserve - reserve a part of the buffer
1240  * @buffer: the ring buffer to reserve from
1241  * @length: the length of the data to reserve (excluding event header)
1242  * @flags: a pointer to save the interrupt flags
1243  *
1244  * Returns a reseverd event on the ring buffer to copy directly to.
1245  * The user of this interface will need to get the body to write into
1246  * and can use the ring_buffer_event_data() interface.
1247  *
1248  * The length is the length of the data needed, not the event length
1249  * which also includes the event header.
1250  *
1251  * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
1252  * If NULL is returned, then nothing has been allocated or locked.
1253  */
1254 struct ring_buffer_event *
1255 ring_buffer_lock_reserve(struct ring_buffer *buffer,
1256                          unsigned long length,
1257                          unsigned long *flags)
1258 {
1259         struct ring_buffer_per_cpu *cpu_buffer;
1260         struct ring_buffer_event *event;
1261         int cpu, resched;
1262
1263         if (ring_buffer_flags != RB_BUFFERS_ON)
1264                 return NULL;
1265
1266         if (atomic_read(&buffer->record_disabled))
1267                 return NULL;
1268
1269         /* If we are tracing schedule, we don't want to recurse */
1270         resched = ftrace_preempt_disable();
1271
1272         cpu = raw_smp_processor_id();
1273
1274         if (!cpu_isset(cpu, buffer->cpumask))
1275                 goto out;
1276
1277         cpu_buffer = buffer->buffers[cpu];
1278
1279         if (atomic_read(&cpu_buffer->record_disabled))
1280                 goto out;
1281
1282         length = rb_calculate_event_length(length);
1283         if (length > BUF_PAGE_SIZE)
1284                 goto out;
1285
1286         event = rb_reserve_next_event(cpu_buffer, RINGBUF_TYPE_DATA, length);
1287         if (!event)
1288                 goto out;
1289
1290         /*
1291          * Need to store resched state on this cpu.
1292          * Only the first needs to.
1293          */
1294
1295         if (preempt_count() == 1)
1296                 per_cpu(rb_need_resched, cpu) = resched;
1297
1298         return event;
1299
1300  out:
1301         ftrace_preempt_enable(resched);
1302         return NULL;
1303 }
1304
1305 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1306                       struct ring_buffer_event *event)
1307 {
1308         cpu_buffer->entries++;
1309
1310         /* Only process further if we own the commit */
1311         if (!rb_is_commit(cpu_buffer, event))
1312                 return;
1313
1314         cpu_buffer->write_stamp += event->time_delta;
1315
1316         rb_set_commit_to_write(cpu_buffer);
1317 }
1318
1319 /**
1320  * ring_buffer_unlock_commit - commit a reserved
1321  * @buffer: The buffer to commit to
1322  * @event: The event pointer to commit.
1323  * @flags: the interrupt flags received from ring_buffer_lock_reserve.
1324  *
1325  * This commits the data to the ring buffer, and releases any locks held.
1326  *
1327  * Must be paired with ring_buffer_lock_reserve.
1328  */
1329 int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1330                               struct ring_buffer_event *event,
1331                               unsigned long flags)
1332 {
1333         struct ring_buffer_per_cpu *cpu_buffer;
1334         int cpu = raw_smp_processor_id();
1335
1336         cpu_buffer = buffer->buffers[cpu];
1337
1338         rb_commit(cpu_buffer, event);
1339
1340         /*
1341          * Only the last preempt count needs to restore preemption.
1342          */
1343         if (preempt_count() == 1)
1344                 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1345         else
1346                 preempt_enable_no_resched_notrace();
1347
1348         return 0;
1349 }
1350
1351 /**
1352  * ring_buffer_write - write data to the buffer without reserving
1353  * @buffer: The ring buffer to write to.
1354  * @length: The length of the data being written (excluding the event header)
1355  * @data: The data to write to the buffer.
1356  *
1357  * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
1358  * one function. If you already have the data to write to the buffer, it
1359  * may be easier to simply call this function.
1360  *
1361  * Note, like ring_buffer_lock_reserve, the length is the length of the data
1362  * and not the length of the event which would hold the header.
1363  */
1364 int ring_buffer_write(struct ring_buffer *buffer,
1365                         unsigned long length,
1366                         void *data)
1367 {
1368         struct ring_buffer_per_cpu *cpu_buffer;
1369         struct ring_buffer_event *event;
1370         unsigned long event_length;
1371         void *body;
1372         int ret = -EBUSY;
1373         int cpu, resched;
1374
1375         if (ring_buffer_flags != RB_BUFFERS_ON)
1376                 return -EBUSY;
1377
1378         if (atomic_read(&buffer->record_disabled))
1379                 return -EBUSY;
1380
1381         resched = ftrace_preempt_disable();
1382
1383         cpu = raw_smp_processor_id();
1384
1385         if (!cpu_isset(cpu, buffer->cpumask))
1386                 goto out;
1387
1388         cpu_buffer = buffer->buffers[cpu];
1389
1390         if (atomic_read(&cpu_buffer->record_disabled))
1391                 goto out;
1392
1393         event_length = rb_calculate_event_length(length);
1394         event = rb_reserve_next_event(cpu_buffer,
1395                                       RINGBUF_TYPE_DATA, event_length);
1396         if (!event)
1397                 goto out;
1398
1399         body = rb_event_data(event);
1400
1401         memcpy(body, data, length);
1402
1403         rb_commit(cpu_buffer, event);
1404
1405         ret = 0;
1406  out:
1407         ftrace_preempt_enable(resched);
1408
1409         return ret;
1410 }
1411
1412 static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1413 {
1414         struct buffer_page *reader = cpu_buffer->reader_page;
1415         struct buffer_page *head = cpu_buffer->head_page;
1416         struct buffer_page *commit = cpu_buffer->commit_page;
1417
1418         return reader->read == rb_page_commit(reader) &&
1419                 (commit == reader ||
1420                  (commit == head &&
1421                   head->read == rb_page_commit(commit)));
1422 }
1423
1424 /**
1425  * ring_buffer_record_disable - stop all writes into the buffer
1426  * @buffer: The ring buffer to stop writes to.
1427  *
1428  * This prevents all writes to the buffer. Any attempt to write
1429  * to the buffer after this will fail and return NULL.
1430  *
1431  * The caller should call synchronize_sched() after this.
1432  */
1433 void ring_buffer_record_disable(struct ring_buffer *buffer)
1434 {
1435         atomic_inc(&buffer->record_disabled);
1436 }
1437
1438 /**
1439  * ring_buffer_record_enable - enable writes to the buffer
1440  * @buffer: The ring buffer to enable writes
1441  *
1442  * Note, multiple disables will need the same number of enables
1443  * to truely enable the writing (much like preempt_disable).
1444  */
1445 void ring_buffer_record_enable(struct ring_buffer *buffer)
1446 {
1447         atomic_dec(&buffer->record_disabled);
1448 }
1449
1450 /**
1451  * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1452  * @buffer: The ring buffer to stop writes to.
1453  * @cpu: The CPU buffer to stop
1454  *
1455  * This prevents all writes to the buffer. Any attempt to write
1456  * to the buffer after this will fail and return NULL.
1457  *
1458  * The caller should call synchronize_sched() after this.
1459  */
1460 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1461 {
1462         struct ring_buffer_per_cpu *cpu_buffer;
1463
1464         if (!cpu_isset(cpu, buffer->cpumask))
1465                 return;
1466
1467         cpu_buffer = buffer->buffers[cpu];
1468         atomic_inc(&cpu_buffer->record_disabled);
1469 }
1470
1471 /**
1472  * ring_buffer_record_enable_cpu - enable writes to the buffer
1473  * @buffer: The ring buffer to enable writes
1474  * @cpu: The CPU to enable.
1475  *
1476  * Note, multiple disables will need the same number of enables
1477  * to truely enable the writing (much like preempt_disable).
1478  */
1479 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1480 {
1481         struct ring_buffer_per_cpu *cpu_buffer;
1482
1483         if (!cpu_isset(cpu, buffer->cpumask))
1484                 return;
1485
1486         cpu_buffer = buffer->buffers[cpu];
1487         atomic_dec(&cpu_buffer->record_disabled);
1488 }
1489
1490 /**
1491  * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1492  * @buffer: The ring buffer
1493  * @cpu: The per CPU buffer to get the entries from.
1494  */
1495 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1496 {
1497         struct ring_buffer_per_cpu *cpu_buffer;
1498
1499         if (!cpu_isset(cpu, buffer->cpumask))
1500                 return 0;
1501
1502         cpu_buffer = buffer->buffers[cpu];
1503         return cpu_buffer->entries;
1504 }
1505
1506 /**
1507  * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1508  * @buffer: The ring buffer
1509  * @cpu: The per CPU buffer to get the number of overruns from
1510  */
1511 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1512 {
1513         struct ring_buffer_per_cpu *cpu_buffer;
1514
1515         if (!cpu_isset(cpu, buffer->cpumask))
1516                 return 0;
1517
1518         cpu_buffer = buffer->buffers[cpu];
1519         return cpu_buffer->overrun;
1520 }
1521
1522 /**
1523  * ring_buffer_entries - get the number of entries in a buffer
1524  * @buffer: The ring buffer
1525  *
1526  * Returns the total number of entries in the ring buffer
1527  * (all CPU entries)
1528  */
1529 unsigned long ring_buffer_entries(struct ring_buffer *buffer)
1530 {
1531         struct ring_buffer_per_cpu *cpu_buffer;
1532         unsigned long entries = 0;
1533         int cpu;
1534
1535         /* if you care about this being correct, lock the buffer */
1536         for_each_buffer_cpu(buffer, cpu) {
1537                 cpu_buffer = buffer->buffers[cpu];
1538                 entries += cpu_buffer->entries;
1539         }
1540
1541         return entries;
1542 }
1543
1544 /**
1545  * ring_buffer_overrun_cpu - get the number of overruns in buffer
1546  * @buffer: The ring buffer
1547  *
1548  * Returns the total number of overruns in the ring buffer
1549  * (all CPU entries)
1550  */
1551 unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
1552 {
1553         struct ring_buffer_per_cpu *cpu_buffer;
1554         unsigned long overruns = 0;
1555         int cpu;
1556
1557         /* if you care about this being correct, lock the buffer */
1558         for_each_buffer_cpu(buffer, cpu) {
1559                 cpu_buffer = buffer->buffers[cpu];
1560                 overruns += cpu_buffer->overrun;
1561         }
1562
1563         return overruns;
1564 }
1565
1566 static void rb_iter_reset(struct ring_buffer_iter *iter)
1567 {
1568         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1569
1570         /* Iterator usage is expected to have record disabled */
1571         if (list_empty(&cpu_buffer->reader_page->list)) {
1572                 iter->head_page = cpu_buffer->head_page;
1573                 iter->head = cpu_buffer->head_page->read;
1574         } else {
1575                 iter->head_page = cpu_buffer->reader_page;
1576                 iter->head = cpu_buffer->reader_page->read;
1577         }
1578         if (iter->head)
1579                 iter->read_stamp = cpu_buffer->read_stamp;
1580         else
1581                 iter->read_stamp = iter->head_page->page->time_stamp;
1582 }
1583
1584 /**
1585  * ring_buffer_iter_reset - reset an iterator
1586  * @iter: The iterator to reset
1587  *
1588  * Resets the iterator, so that it will start from the beginning
1589  * again.
1590  */
1591 void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1592 {
1593         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1594         unsigned long flags;
1595
1596         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1597         rb_iter_reset(iter);
1598         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1599 }
1600
1601 /**
1602  * ring_buffer_iter_empty - check if an iterator has no more to read
1603  * @iter: The iterator to check
1604  */
1605 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
1606 {
1607         struct ring_buffer_per_cpu *cpu_buffer;
1608
1609         cpu_buffer = iter->cpu_buffer;
1610
1611         return iter->head_page == cpu_buffer->commit_page &&
1612                 iter->head == rb_commit_index(cpu_buffer);
1613 }
1614
1615 static void
1616 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1617                      struct ring_buffer_event *event)
1618 {
1619         u64 delta;
1620
1621         switch (event->type) {
1622         case RINGBUF_TYPE_PADDING:
1623                 return;
1624
1625         case RINGBUF_TYPE_TIME_EXTEND:
1626                 delta = event->array[0];
1627                 delta <<= TS_SHIFT;
1628                 delta += event->time_delta;
1629                 cpu_buffer->read_stamp += delta;
1630                 return;
1631
1632         case RINGBUF_TYPE_TIME_STAMP:
1633                 /* FIXME: not implemented */
1634                 return;
1635
1636         case RINGBUF_TYPE_DATA:
1637                 cpu_buffer->read_stamp += event->time_delta;
1638                 return;
1639
1640         default:
1641                 BUG();
1642         }
1643         return;
1644 }
1645
1646 static void
1647 rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
1648                           struct ring_buffer_event *event)
1649 {
1650         u64 delta;
1651
1652         switch (event->type) {
1653         case RINGBUF_TYPE_PADDING:
1654                 return;
1655
1656         case RINGBUF_TYPE_TIME_EXTEND:
1657                 delta = event->array[0];
1658                 delta <<= TS_SHIFT;
1659                 delta += event->time_delta;
1660                 iter->read_stamp += delta;
1661                 return;
1662
1663         case RINGBUF_TYPE_TIME_STAMP:
1664                 /* FIXME: not implemented */
1665                 return;
1666
1667         case RINGBUF_TYPE_DATA:
1668                 iter->read_stamp += event->time_delta;
1669                 return;
1670
1671         default:
1672                 BUG();
1673         }
1674         return;
1675 }
1676
1677 static struct buffer_page *
1678 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1679 {
1680         struct buffer_page *reader = NULL;
1681         unsigned long flags;
1682         int nr_loops = 0;
1683
1684         local_irq_save(flags);
1685         __raw_spin_lock(&cpu_buffer->lock);
1686
1687  again:
1688         /*
1689          * This should normally only loop twice. But because the
1690          * start of the reader inserts an empty page, it causes
1691          * a case where we will loop three times. There should be no
1692          * reason to loop four times (that I know of).
1693          */
1694         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
1695                 reader = NULL;
1696                 goto out;
1697         }
1698
1699         reader = cpu_buffer->reader_page;
1700
1701         /* If there's more to read, return this page */
1702         if (cpu_buffer->reader_page->read < rb_page_size(reader))
1703                 goto out;
1704
1705         /* Never should we have an index greater than the size */
1706         if (RB_WARN_ON(cpu_buffer,
1707                        cpu_buffer->reader_page->read > rb_page_size(reader)))
1708                 goto out;
1709
1710         /* check if we caught up to the tail */
1711         reader = NULL;
1712         if (cpu_buffer->commit_page == cpu_buffer->reader_page)
1713                 goto out;
1714
1715         /*
1716          * Splice the empty reader page into the list around the head.
1717          * Reset the reader page to size zero.
1718          */
1719
1720         reader = cpu_buffer->head_page;
1721         cpu_buffer->reader_page->list.next = reader->list.next;
1722         cpu_buffer->reader_page->list.prev = reader->list.prev;
1723
1724         local_set(&cpu_buffer->reader_page->write, 0);
1725         local_set(&cpu_buffer->reader_page->page->commit, 0);
1726
1727         /* Make the reader page now replace the head */
1728         reader->list.prev->next = &cpu_buffer->reader_page->list;
1729         reader->list.next->prev = &cpu_buffer->reader_page->list;
1730
1731         /*
1732          * If the tail is on the reader, then we must set the head
1733          * to the inserted page, otherwise we set it one before.
1734          */
1735         cpu_buffer->head_page = cpu_buffer->reader_page;
1736
1737         if (cpu_buffer->commit_page != reader)
1738                 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
1739
1740         /* Finally update the reader page to the new head */
1741         cpu_buffer->reader_page = reader;
1742         rb_reset_reader_page(cpu_buffer);
1743
1744         goto again;
1745
1746  out:
1747         __raw_spin_unlock(&cpu_buffer->lock);
1748         local_irq_restore(flags);
1749
1750         return reader;
1751 }
1752
1753 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
1754 {
1755         struct ring_buffer_event *event;
1756         struct buffer_page *reader;
1757         unsigned length;
1758
1759         reader = rb_get_reader_page(cpu_buffer);
1760
1761         /* This function should not be called when buffer is empty */
1762         if (RB_WARN_ON(cpu_buffer, !reader))
1763                 return;
1764
1765         event = rb_reader_event(cpu_buffer);
1766
1767         if (event->type == RINGBUF_TYPE_DATA)
1768                 cpu_buffer->entries--;
1769
1770         rb_update_read_stamp(cpu_buffer, event);
1771
1772         length = rb_event_length(event);
1773         cpu_buffer->reader_page->read += length;
1774 }
1775
1776 static void rb_advance_iter(struct ring_buffer_iter *iter)
1777 {
1778         struct ring_buffer *buffer;
1779         struct ring_buffer_per_cpu *cpu_buffer;
1780         struct ring_buffer_event *event;
1781         unsigned length;
1782
1783         cpu_buffer = iter->cpu_buffer;
1784         buffer = cpu_buffer->buffer;
1785
1786         /*
1787          * Check if we are at the end of the buffer.
1788          */
1789         if (iter->head >= rb_page_size(iter->head_page)) {
1790                 if (RB_WARN_ON(buffer,
1791                                iter->head_page == cpu_buffer->commit_page))
1792                         return;
1793                 rb_inc_iter(iter);
1794                 return;
1795         }
1796
1797         event = rb_iter_head_event(iter);
1798
1799         length = rb_event_length(event);
1800
1801         /*
1802          * This should not be called to advance the header if we are
1803          * at the tail of the buffer.
1804          */
1805         if (RB_WARN_ON(cpu_buffer,
1806                        (iter->head_page == cpu_buffer->commit_page) &&
1807                        (iter->head + length > rb_commit_index(cpu_buffer))))
1808                 return;
1809
1810         rb_update_iter_read_stamp(iter, event);
1811
1812         iter->head += length;
1813
1814         /* check for end of page padding */
1815         if ((iter->head >= rb_page_size(iter->head_page)) &&
1816             (iter->head_page != cpu_buffer->commit_page))
1817                 rb_advance_iter(iter);
1818 }
1819
1820 static struct ring_buffer_event *
1821 rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1822 {
1823         struct ring_buffer_per_cpu *cpu_buffer;
1824         struct ring_buffer_event *event;
1825         struct buffer_page *reader;
1826         int nr_loops = 0;
1827
1828         if (!cpu_isset(cpu, buffer->cpumask))
1829                 return NULL;
1830
1831         cpu_buffer = buffer->buffers[cpu];
1832
1833  again:
1834         /*
1835          * We repeat when a timestamp is encountered. It is possible
1836          * to get multiple timestamps from an interrupt entering just
1837          * as one timestamp is about to be written. The max times
1838          * that this can happen is the number of nested interrupts we
1839          * can have.  Nesting 10 deep of interrupts is clearly
1840          * an anomaly.
1841          */
1842         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
1843                 return NULL;
1844
1845         reader = rb_get_reader_page(cpu_buffer);
1846         if (!reader)
1847                 return NULL;
1848
1849         event = rb_reader_event(cpu_buffer);
1850
1851         switch (event->type) {
1852         case RINGBUF_TYPE_PADDING:
1853                 RB_WARN_ON(cpu_buffer, 1);
1854                 rb_advance_reader(cpu_buffer);
1855                 return NULL;
1856
1857         case RINGBUF_TYPE_TIME_EXTEND:
1858                 /* Internal data, OK to advance */
1859                 rb_advance_reader(cpu_buffer);
1860                 goto again;
1861
1862         case RINGBUF_TYPE_TIME_STAMP:
1863                 /* FIXME: not implemented */
1864                 rb_advance_reader(cpu_buffer);
1865                 goto again;
1866
1867         case RINGBUF_TYPE_DATA:
1868                 if (ts) {
1869                         *ts = cpu_buffer->read_stamp + event->time_delta;
1870                         ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1871                 }
1872                 return event;
1873
1874         default:
1875                 BUG();
1876         }
1877
1878         return NULL;
1879 }
1880
1881 static struct ring_buffer_event *
1882 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1883 {
1884         struct ring_buffer *buffer;
1885         struct ring_buffer_per_cpu *cpu_buffer;
1886         struct ring_buffer_event *event;
1887         int nr_loops = 0;
1888
1889         if (ring_buffer_iter_empty(iter))
1890                 return NULL;
1891
1892         cpu_buffer = iter->cpu_buffer;
1893         buffer = cpu_buffer->buffer;
1894
1895  again:
1896         /*
1897          * We repeat when a timestamp is encountered. It is possible
1898          * to get multiple timestamps from an interrupt entering just
1899          * as one timestamp is about to be written. The max times
1900          * that this can happen is the number of nested interrupts we
1901          * can have. Nesting 10 deep of interrupts is clearly
1902          * an anomaly.
1903          */
1904         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
1905                 return NULL;
1906
1907         if (rb_per_cpu_empty(cpu_buffer))
1908                 return NULL;
1909
1910         event = rb_iter_head_event(iter);
1911
1912         switch (event->type) {
1913         case RINGBUF_TYPE_PADDING:
1914                 rb_inc_iter(iter);
1915                 goto again;
1916
1917         case RINGBUF_TYPE_TIME_EXTEND:
1918                 /* Internal data, OK to advance */
1919                 rb_advance_iter(iter);
1920                 goto again;
1921
1922         case RINGBUF_TYPE_TIME_STAMP:
1923                 /* FIXME: not implemented */
1924                 rb_advance_iter(iter);
1925                 goto again;
1926
1927         case RINGBUF_TYPE_DATA:
1928                 if (ts) {
1929                         *ts = iter->read_stamp + event->time_delta;
1930                         ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1931                 }
1932                 return event;
1933
1934         default:
1935                 BUG();
1936         }
1937
1938         return NULL;
1939 }
1940
1941 /**
1942  * ring_buffer_peek - peek at the next event to be read
1943  * @buffer: The ring buffer to read
1944  * @cpu: The cpu to peak at
1945  * @ts: The timestamp counter of this event.
1946  *
1947  * This will return the event that will be read next, but does
1948  * not consume the data.
1949  */
1950 struct ring_buffer_event *
1951 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1952 {
1953         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
1954         struct ring_buffer_event *event;
1955         unsigned long flags;
1956
1957         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1958         event = rb_buffer_peek(buffer, cpu, ts);
1959         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1960
1961         return event;
1962 }
1963
1964 /**
1965  * ring_buffer_iter_peek - peek at the next event to be read
1966  * @iter: The ring buffer iterator
1967  * @ts: The timestamp counter of this event.
1968  *
1969  * This will return the event that will be read next, but does
1970  * not increment the iterator.
1971  */
1972 struct ring_buffer_event *
1973 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1974 {
1975         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1976         struct ring_buffer_event *event;
1977         unsigned long flags;
1978
1979         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1980         event = rb_iter_peek(iter, ts);
1981         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1982
1983         return event;
1984 }
1985
1986 /**
1987  * ring_buffer_consume - return an event and consume it
1988  * @buffer: The ring buffer to get the next event from
1989  *
1990  * Returns the next event in the ring buffer, and that event is consumed.
1991  * Meaning, that sequential reads will keep returning a different event,
1992  * and eventually empty the ring buffer if the producer is slower.
1993  */
1994 struct ring_buffer_event *
1995 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
1996 {
1997         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
1998         struct ring_buffer_event *event;
1999         unsigned long flags;
2000
2001         if (!cpu_isset(cpu, buffer->cpumask))
2002                 return NULL;
2003
2004         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2005
2006         event = rb_buffer_peek(buffer, cpu, ts);
2007         if (!event)
2008                 goto out;
2009
2010         rb_advance_reader(cpu_buffer);
2011
2012  out:
2013         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2014
2015         return event;
2016 }
2017
2018 /**
2019  * ring_buffer_read_start - start a non consuming read of the buffer
2020  * @buffer: The ring buffer to read from
2021  * @cpu: The cpu buffer to iterate over
2022  *
2023  * This starts up an iteration through the buffer. It also disables
2024  * the recording to the buffer until the reading is finished.
2025  * This prevents the reading from being corrupted. This is not
2026  * a consuming read, so a producer is not expected.
2027  *
2028  * Must be paired with ring_buffer_finish.
2029  */
2030 struct ring_buffer_iter *
2031 ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
2032 {
2033         struct ring_buffer_per_cpu *cpu_buffer;
2034         struct ring_buffer_iter *iter;
2035         unsigned long flags;
2036
2037         if (!cpu_isset(cpu, buffer->cpumask))
2038                 return NULL;
2039
2040         iter = kmalloc(sizeof(*iter), GFP_KERNEL);
2041         if (!iter)
2042                 return NULL;
2043
2044         cpu_buffer = buffer->buffers[cpu];
2045
2046         iter->cpu_buffer = cpu_buffer;
2047
2048         atomic_inc(&cpu_buffer->record_disabled);
2049         synchronize_sched();
2050
2051         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2052         __raw_spin_lock(&cpu_buffer->lock);
2053         rb_iter_reset(iter);
2054         __raw_spin_unlock(&cpu_buffer->lock);
2055         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2056
2057         return iter;
2058 }
2059
2060 /**
2061  * ring_buffer_finish - finish reading the iterator of the buffer
2062  * @iter: The iterator retrieved by ring_buffer_start
2063  *
2064  * This re-enables the recording to the buffer, and frees the
2065  * iterator.
2066  */
2067 void
2068 ring_buffer_read_finish(struct ring_buffer_iter *iter)
2069 {
2070         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2071
2072         atomic_dec(&cpu_buffer->record_disabled);
2073         kfree(iter);
2074 }
2075
2076 /**
2077  * ring_buffer_read - read the next item in the ring buffer by the iterator
2078  * @iter: The ring buffer iterator
2079  * @ts: The time stamp of the event read.
2080  *
2081  * This reads the next event in the ring buffer and increments the iterator.
2082  */
2083 struct ring_buffer_event *
2084 ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
2085 {
2086         struct ring_buffer_event *event;
2087         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2088         unsigned long flags;
2089
2090         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2091         event = rb_iter_peek(iter, ts);
2092         if (!event)
2093                 goto out;
2094
2095         rb_advance_iter(iter);
2096  out:
2097         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2098
2099         return event;
2100 }
2101
2102 /**
2103  * ring_buffer_size - return the size of the ring buffer (in bytes)
2104  * @buffer: The ring buffer.
2105  */
2106 unsigned long ring_buffer_size(struct ring_buffer *buffer)
2107 {
2108         return BUF_PAGE_SIZE * buffer->pages;
2109 }
2110
2111 static void
2112 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
2113 {
2114         cpu_buffer->head_page
2115                 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
2116         local_set(&cpu_buffer->head_page->write, 0);
2117         local_set(&cpu_buffer->head_page->page->commit, 0);
2118
2119         cpu_buffer->head_page->read = 0;
2120
2121         cpu_buffer->tail_page = cpu_buffer->head_page;
2122         cpu_buffer->commit_page = cpu_buffer->head_page;
2123
2124         INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
2125         local_set(&cpu_buffer->reader_page->write, 0);
2126         local_set(&cpu_buffer->reader_page->page->commit, 0);
2127         cpu_buffer->reader_page->read = 0;
2128
2129         cpu_buffer->overrun = 0;
2130         cpu_buffer->entries = 0;
2131 }
2132
2133 /**
2134  * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
2135  * @buffer: The ring buffer to reset a per cpu buffer of
2136  * @cpu: The CPU buffer to be reset
2137  */
2138 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2139 {
2140         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2141         unsigned long flags;
2142
2143         if (!cpu_isset(cpu, buffer->cpumask))
2144                 return;
2145
2146         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2147
2148         __raw_spin_lock(&cpu_buffer->lock);
2149
2150         rb_reset_cpu(cpu_buffer);
2151
2152         __raw_spin_unlock(&cpu_buffer->lock);
2153
2154         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2155 }
2156
2157 /**
2158  * ring_buffer_reset - reset a ring buffer
2159  * @buffer: The ring buffer to reset all cpu buffers
2160  */
2161 void ring_buffer_reset(struct ring_buffer *buffer)
2162 {
2163         int cpu;
2164
2165         for_each_buffer_cpu(buffer, cpu)
2166                 ring_buffer_reset_cpu(buffer, cpu);
2167 }
2168
2169 /**
2170  * rind_buffer_empty - is the ring buffer empty?
2171  * @buffer: The ring buffer to test
2172  */
2173 int ring_buffer_empty(struct ring_buffer *buffer)
2174 {
2175         struct ring_buffer_per_cpu *cpu_buffer;
2176         int cpu;
2177
2178         /* yes this is racy, but if you don't like the race, lock the buffer */
2179         for_each_buffer_cpu(buffer, cpu) {
2180                 cpu_buffer = buffer->buffers[cpu];
2181                 if (!rb_per_cpu_empty(cpu_buffer))
2182                         return 0;
2183         }
2184         return 1;
2185 }
2186
2187 /**
2188  * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
2189  * @buffer: The ring buffer
2190  * @cpu: The CPU buffer to test
2191  */
2192 int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2193 {
2194         struct ring_buffer_per_cpu *cpu_buffer;
2195
2196         if (!cpu_isset(cpu, buffer->cpumask))
2197                 return 1;
2198
2199         cpu_buffer = buffer->buffers[cpu];
2200         return rb_per_cpu_empty(cpu_buffer);
2201 }
2202
2203 /**
2204  * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
2205  * @buffer_a: One buffer to swap with
2206  * @buffer_b: The other buffer to swap with
2207  *
2208  * This function is useful for tracers that want to take a "snapshot"
2209  * of a CPU buffer and has another back up buffer lying around.
2210  * it is expected that the tracer handles the cpu buffer not being
2211  * used at the moment.
2212  */
2213 int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2214                          struct ring_buffer *buffer_b, int cpu)
2215 {
2216         struct ring_buffer_per_cpu *cpu_buffer_a;
2217         struct ring_buffer_per_cpu *cpu_buffer_b;
2218
2219         if (!cpu_isset(cpu, buffer_a->cpumask) ||
2220             !cpu_isset(cpu, buffer_b->cpumask))
2221                 return -EINVAL;
2222
2223         /* At least make sure the two buffers are somewhat the same */
2224         if (buffer_a->pages != buffer_b->pages)
2225                 return -EINVAL;
2226
2227         cpu_buffer_a = buffer_a->buffers[cpu];
2228         cpu_buffer_b = buffer_b->buffers[cpu];
2229
2230         /*
2231          * We can't do a synchronize_sched here because this
2232          * function can be called in atomic context.
2233          * Normally this will be called from the same CPU as cpu.
2234          * If not it's up to the caller to protect this.
2235          */
2236         atomic_inc(&cpu_buffer_a->record_disabled);
2237         atomic_inc(&cpu_buffer_b->record_disabled);
2238
2239         buffer_a->buffers[cpu] = cpu_buffer_b;
2240         buffer_b->buffers[cpu] = cpu_buffer_a;
2241
2242         cpu_buffer_b->buffer = buffer_a;
2243         cpu_buffer_a->buffer = buffer_b;
2244
2245         atomic_dec(&cpu_buffer_a->record_disabled);
2246         atomic_dec(&cpu_buffer_b->record_disabled);
2247
2248         return 0;
2249 }
2250
2251 static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
2252                               struct buffer_data_page *bpage)
2253 {
2254         struct ring_buffer_event *event;
2255         unsigned long head;
2256
2257         __raw_spin_lock(&cpu_buffer->lock);
2258         for (head = 0; head < local_read(&bpage->commit);
2259              head += rb_event_length(event)) {
2260
2261                 event = __rb_data_page_index(bpage, head);
2262                 if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
2263                         return;
2264                 /* Only count data entries */
2265                 if (event->type != RINGBUF_TYPE_DATA)
2266                         continue;
2267                 cpu_buffer->entries--;
2268         }
2269         __raw_spin_unlock(&cpu_buffer->lock);
2270 }
2271
2272 /**
2273  * ring_buffer_alloc_read_page - allocate a page to read from buffer
2274  * @buffer: the buffer to allocate for.
2275  *
2276  * This function is used in conjunction with ring_buffer_read_page.
2277  * When reading a full page from the ring buffer, these functions
2278  * can be used to speed up the process. The calling function should
2279  * allocate a few pages first with this function. Then when it
2280  * needs to get pages from the ring buffer, it passes the result
2281  * of this function into ring_buffer_read_page, which will swap
2282  * the page that was allocated, with the read page of the buffer.
2283  *
2284  * Returns:
2285  *  The page allocated, or NULL on error.
2286  */
2287 void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
2288 {
2289         unsigned long addr;
2290         struct buffer_data_page *bpage;
2291
2292         addr = __get_free_page(GFP_KERNEL);
2293         if (!addr)
2294                 return NULL;
2295
2296         bpage = (void *)addr;
2297
2298         return bpage;
2299 }
2300
2301 /**
2302  * ring_buffer_free_read_page - free an allocated read page
2303  * @buffer: the buffer the page was allocate for
2304  * @data: the page to free
2305  *
2306  * Free a page allocated from ring_buffer_alloc_read_page.
2307  */
2308 void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
2309 {
2310         free_page((unsigned long)data);
2311 }
2312
2313 /**
2314  * ring_buffer_read_page - extract a page from the ring buffer
2315  * @buffer: buffer to extract from
2316  * @data_page: the page to use allocated from ring_buffer_alloc_read_page
2317  * @cpu: the cpu of the buffer to extract
2318  * @full: should the extraction only happen when the page is full.
2319  *
2320  * This function will pull out a page from the ring buffer and consume it.
2321  * @data_page must be the address of the variable that was returned
2322  * from ring_buffer_alloc_read_page. This is because the page might be used
2323  * to swap with a page in the ring buffer.
2324  *
2325  * for example:
2326  *      rpage = ring_buffer_alloc_page(buffer);
2327  *      if (!rpage)
2328  *              return error;
2329  *      ret = ring_buffer_read_page(buffer, &rpage, cpu, 0);
2330  *      if (ret)
2331  *              process_page(rpage);
2332  *
2333  * When @full is set, the function will not return true unless
2334  * the writer is off the reader page.
2335  *
2336  * Note: it is up to the calling functions to handle sleeps and wakeups.
2337  *  The ring buffer can be used anywhere in the kernel and can not
2338  *  blindly call wake_up. The layer that uses the ring buffer must be
2339  *  responsible for that.
2340  *
2341  * Returns:
2342  *  1 if data has been transferred
2343  *  0 if no data has been transferred.
2344  */
2345 int ring_buffer_read_page(struct ring_buffer *buffer,
2346                             void **data_page, int cpu, int full)
2347 {
2348         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2349         struct ring_buffer_event *event;
2350         struct buffer_data_page *bpage;
2351         unsigned long flags;
2352         int ret = 0;
2353
2354         if (!data_page)
2355                 return 0;
2356
2357         bpage = *data_page;
2358         if (!bpage)
2359                 return 0;
2360
2361         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2362
2363         /*
2364          * rb_buffer_peek will get the next ring buffer if
2365          * the current reader page is empty.
2366          */
2367         event = rb_buffer_peek(buffer, cpu, NULL);
2368         if (!event)
2369                 goto out;
2370
2371         /* check for data */
2372         if (!local_read(&cpu_buffer->reader_page->page->commit))
2373                 goto out;
2374         /*
2375          * If the writer is already off of the read page, then simply
2376          * switch the read page with the given page. Otherwise
2377          * we need to copy the data from the reader to the writer.
2378          */
2379         if (cpu_buffer->reader_page == cpu_buffer->commit_page) {
2380                 unsigned int read = cpu_buffer->reader_page->read;
2381
2382                 if (full)
2383                         goto out;
2384                 /* The writer is still on the reader page, we must copy */
2385                 bpage = cpu_buffer->reader_page->page;
2386                 memcpy(bpage->data,
2387                        cpu_buffer->reader_page->page->data + read,
2388                        local_read(&bpage->commit) - read);
2389
2390                 /* consume what was read */
2391                 cpu_buffer->reader_page += read;
2392
2393         } else {
2394                 /* swap the pages */
2395                 rb_init_page(bpage);
2396                 bpage = cpu_buffer->reader_page->page;
2397                 cpu_buffer->reader_page->page = *data_page;
2398                 cpu_buffer->reader_page->read = 0;
2399                 *data_page = bpage;
2400         }
2401         ret = 1;
2402
2403         /* update the entry counter */
2404         rb_remove_entries(cpu_buffer, bpage);
2405  out:
2406         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2407
2408         return ret;
2409 }
2410
2411 static ssize_t
2412 rb_simple_read(struct file *filp, char __user *ubuf,
2413                size_t cnt, loff_t *ppos)
2414 {
2415         long *p = filp->private_data;
2416         char buf[64];
2417         int r;
2418
2419         if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
2420                 r = sprintf(buf, "permanently disabled\n");
2421         else
2422                 r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
2423
2424         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2425 }
2426
2427 static ssize_t
2428 rb_simple_write(struct file *filp, const char __user *ubuf,
2429                 size_t cnt, loff_t *ppos)
2430 {
2431         long *p = filp->private_data;
2432         char buf[64];
2433         long val;
2434         int ret;
2435
2436         if (cnt >= sizeof(buf))
2437                 return -EINVAL;
2438
2439         if (copy_from_user(&buf, ubuf, cnt))
2440                 return -EFAULT;
2441
2442         buf[cnt] = 0;
2443
2444         ret = strict_strtoul(buf, 10, &val);
2445         if (ret < 0)
2446                 return ret;
2447
2448         if (val)
2449                 set_bit(RB_BUFFERS_ON_BIT, p);
2450         else
2451                 clear_bit(RB_BUFFERS_ON_BIT, p);
2452
2453         (*ppos)++;
2454
2455         return cnt;
2456 }
2457
2458 static struct file_operations rb_simple_fops = {
2459         .open           = tracing_open_generic,
2460         .read           = rb_simple_read,
2461         .write          = rb_simple_write,
2462 };
2463
2464
2465 static __init int rb_init_debugfs(void)
2466 {
2467         struct dentry *d_tracer;
2468         struct dentry *entry;
2469
2470         d_tracer = tracing_init_dentry();
2471
2472         entry = debugfs_create_file("tracing_on", 0644, d_tracer,
2473                                     &ring_buffer_flags, &rb_simple_fops);
2474         if (!entry)
2475                 pr_warning("Could not create debugfs 'tracing_on' entry\n");
2476
2477         return 0;
2478 }
2479
2480 fs_initcall(rb_init_debugfs);