4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
6 #include <linux/ring_buffer.h>
7 #include <linux/spinlock.h>
8 #include <linux/debugfs.h>
9 #include <linux/uaccess.h>
10 #include <linux/module.h>
11 #include <linux/percpu.h>
12 #include <linux/mutex.h>
13 #include <linux/sched.h> /* used for sched_clock() (for now) */
14 #include <linux/init.h>
15 #include <linux/hash.h>
16 #include <linux/list.h>
21 /* Up this if you want to test the TIME_EXTENTS and normalization */
25 u64 ring_buffer_time_stamp(int cpu)
27 /* shift to debug/test normalization and TIME_EXTENTS */
28 return sched_clock() << DEBUG_SHIFT;
31 void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
33 /* Just stupid testing the normalize function and deltas */
37 #define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
38 #define RB_ALIGNMENT_SHIFT 2
39 #define RB_ALIGNMENT (1 << RB_ALIGNMENT_SHIFT)
40 #define RB_MAX_SMALL_DATA 28
43 RB_LEN_TIME_EXTEND = 8,
44 RB_LEN_TIME_STAMP = 16,
47 /* inline for ring buffer fast paths */
48 static inline unsigned
49 rb_event_length(struct ring_buffer_event *event)
53 switch (event->type) {
54 case RINGBUF_TYPE_PADDING:
58 case RINGBUF_TYPE_TIME_EXTEND:
59 return RB_LEN_TIME_EXTEND;
61 case RINGBUF_TYPE_TIME_STAMP:
62 return RB_LEN_TIME_STAMP;
64 case RINGBUF_TYPE_DATA:
66 length = event->len << RB_ALIGNMENT_SHIFT;
68 length = event->array[0];
69 return length + RB_EVNT_HDR_SIZE;
78 * ring_buffer_event_length - return the length of the event
79 * @event: the event to get the length of
81 unsigned ring_buffer_event_length(struct ring_buffer_event *event)
83 return rb_event_length(event);
86 /* inline for ring buffer fast paths */
88 rb_event_data(struct ring_buffer_event *event)
90 BUG_ON(event->type != RINGBUF_TYPE_DATA);
91 /* If length is in len field, then array[0] has the data */
93 return (void *)&event->array[0];
94 /* Otherwise length is in array[0] and array[1] has the data */
95 return (void *)&event->array[1];
99 * ring_buffer_event_data - return the data of the event
100 * @event: the event to get the data from
102 void *ring_buffer_event_data(struct ring_buffer_event *event)
104 return rb_event_data(event);
107 #define for_each_buffer_cpu(buffer, cpu) \
108 for_each_cpu_mask(cpu, buffer->cpumask)
111 #define TS_MASK ((1ULL << TS_SHIFT) - 1)
112 #define TS_DELTA_TEST (~TS_MASK)
115 * This hack stolen from mm/slob.c.
116 * We can store per page timing information in the page frame of the page.
117 * Thanks to Peter Zijlstra for suggesting this idea.
120 u64 time_stamp; /* page time stamp */
121 local_t write; /* index for next write */
122 local_t commit; /* write commited index */
123 unsigned read; /* index for next read */
124 struct list_head list; /* list of free pages */
125 void *page; /* Actual data page */
129 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
132 static inline void free_buffer_page(struct buffer_page *bpage)
135 free_page((unsigned long)bpage->page);
140 * We need to fit the time_stamp delta into 27 bits.
142 static inline int test_time_stamp(u64 delta)
144 if (delta & TS_DELTA_TEST)
149 #define BUF_PAGE_SIZE PAGE_SIZE
152 * head_page == tail_page && head == tail then buffer is empty.
154 struct ring_buffer_per_cpu {
156 struct ring_buffer *buffer;
158 struct lock_class_key lock_key;
159 struct list_head pages;
160 struct buffer_page *head_page; /* read from head */
161 struct buffer_page *tail_page; /* write to tail */
162 struct buffer_page *commit_page; /* commited pages */
163 struct buffer_page *reader_page;
164 unsigned long overrun;
165 unsigned long entries;
168 atomic_t record_disabled;
177 atomic_t record_disabled;
181 struct ring_buffer_per_cpu **buffers;
184 struct ring_buffer_iter {
185 struct ring_buffer_per_cpu *cpu_buffer;
187 struct buffer_page *head_page;
191 /* buffer may be either ring_buffer or ring_buffer_per_cpu */
192 #define RB_WARN_ON(buffer, cond) \
194 if (unlikely(cond)) { \
195 atomic_inc(&buffer->record_disabled); \
200 #define RB_WARN_ON_RET(buffer, cond) \
202 if (unlikely(cond)) { \
203 atomic_inc(&buffer->record_disabled); \
209 #define RB_WARN_ON_RET_INT(buffer, cond) \
211 if (unlikely(cond)) { \
212 atomic_inc(&buffer->record_disabled); \
218 #define RB_WARN_ON_RET_NULL(buffer, cond) \
220 if (unlikely(cond)) { \
221 atomic_inc(&buffer->record_disabled); \
227 #define RB_WARN_ON_ONCE(buffer, cond) \
230 if (unlikely(cond) && !once) { \
232 atomic_inc(&buffer->record_disabled); \
237 /* buffer must be ring_buffer not per_cpu */
238 #define RB_WARN_ON_UNLOCK(buffer, cond) \
240 if (unlikely(cond)) { \
241 mutex_unlock(&buffer->mutex); \
242 atomic_inc(&buffer->record_disabled); \
249 * check_pages - integrity check of buffer pages
250 * @cpu_buffer: CPU buffer with pages to test
252 * As a safty measure we check to make sure the data pages have not
255 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
257 struct list_head *head = &cpu_buffer->pages;
258 struct buffer_page *page, *tmp;
260 RB_WARN_ON_RET_INT(cpu_buffer, head->next->prev != head);
261 RB_WARN_ON_RET_INT(cpu_buffer, head->prev->next != head);
263 list_for_each_entry_safe(page, tmp, head, list) {
264 RB_WARN_ON_RET_INT(cpu_buffer,
265 page->list.next->prev != &page->list);
266 RB_WARN_ON_RET_INT(cpu_buffer,
267 page->list.prev->next != &page->list);
273 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
276 struct list_head *head = &cpu_buffer->pages;
277 struct buffer_page *page, *tmp;
282 for (i = 0; i < nr_pages; i++) {
283 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
284 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
287 list_add(&page->list, &pages);
289 addr = __get_free_page(GFP_KERNEL);
292 page->page = (void *)addr;
295 list_splice(&pages, head);
297 rb_check_pages(cpu_buffer);
302 list_for_each_entry_safe(page, tmp, &pages, list) {
303 list_del_init(&page->list);
304 free_buffer_page(page);
309 static struct ring_buffer_per_cpu *
310 rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
312 struct ring_buffer_per_cpu *cpu_buffer;
313 struct buffer_page *page;
317 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
318 GFP_KERNEL, cpu_to_node(cpu));
322 cpu_buffer->cpu = cpu;
323 cpu_buffer->buffer = buffer;
324 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
325 INIT_LIST_HEAD(&cpu_buffer->pages);
327 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
328 GFP_KERNEL, cpu_to_node(cpu));
330 goto fail_free_buffer;
332 cpu_buffer->reader_page = page;
333 addr = __get_free_page(GFP_KERNEL);
335 goto fail_free_reader;
336 page->page = (void *)addr;
338 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
340 ret = rb_allocate_pages(cpu_buffer, buffer->pages);
342 goto fail_free_reader;
344 cpu_buffer->head_page
345 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
346 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
351 free_buffer_page(cpu_buffer->reader_page);
358 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
360 struct list_head *head = &cpu_buffer->pages;
361 struct buffer_page *page, *tmp;
363 list_del_init(&cpu_buffer->reader_page->list);
364 free_buffer_page(cpu_buffer->reader_page);
366 list_for_each_entry_safe(page, tmp, head, list) {
367 list_del_init(&page->list);
368 free_buffer_page(page);
374 * Causes compile errors if the struct buffer_page gets bigger
375 * than the struct page.
377 extern int ring_buffer_page_too_big(void);
380 * ring_buffer_alloc - allocate a new ring_buffer
381 * @size: the size in bytes that is needed.
382 * @flags: attributes to set for the ring buffer.
384 * Currently the only flag that is available is the RB_FL_OVERWRITE
385 * flag. This flag means that the buffer will overwrite old data
386 * when the buffer wraps. If this flag is not set, the buffer will
387 * drop data when the tail hits the head.
389 struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
391 struct ring_buffer *buffer;
395 /* Paranoid! Optimizes out when all is well */
396 if (sizeof(struct buffer_page) > sizeof(struct page))
397 ring_buffer_page_too_big();
400 /* keep it in its own cache line */
401 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
406 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
407 buffer->flags = flags;
409 /* need at least two pages */
410 if (buffer->pages == 1)
413 buffer->cpumask = cpu_possible_map;
414 buffer->cpus = nr_cpu_ids;
416 bsize = sizeof(void *) * nr_cpu_ids;
417 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
419 if (!buffer->buffers)
420 goto fail_free_buffer;
422 for_each_buffer_cpu(buffer, cpu) {
423 buffer->buffers[cpu] =
424 rb_allocate_cpu_buffer(buffer, cpu);
425 if (!buffer->buffers[cpu])
426 goto fail_free_buffers;
429 mutex_init(&buffer->mutex);
434 for_each_buffer_cpu(buffer, cpu) {
435 if (buffer->buffers[cpu])
436 rb_free_cpu_buffer(buffer->buffers[cpu]);
438 kfree(buffer->buffers);
446 * ring_buffer_free - free a ring buffer.
447 * @buffer: the buffer to free.
450 ring_buffer_free(struct ring_buffer *buffer)
454 for_each_buffer_cpu(buffer, cpu)
455 rb_free_cpu_buffer(buffer->buffers[cpu]);
460 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
463 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
465 struct buffer_page *page;
469 atomic_inc(&cpu_buffer->record_disabled);
472 for (i = 0; i < nr_pages; i++) {
473 RB_WARN_ON_RET(cpu_buffer, list_empty(&cpu_buffer->pages));
474 p = cpu_buffer->pages.next;
475 page = list_entry(p, struct buffer_page, list);
476 list_del_init(&page->list);
477 free_buffer_page(page);
479 RB_WARN_ON_RET(cpu_buffer, list_empty(&cpu_buffer->pages));
481 rb_reset_cpu(cpu_buffer);
483 rb_check_pages(cpu_buffer);
485 atomic_dec(&cpu_buffer->record_disabled);
490 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
491 struct list_head *pages, unsigned nr_pages)
493 struct buffer_page *page;
497 atomic_inc(&cpu_buffer->record_disabled);
500 for (i = 0; i < nr_pages; i++) {
501 RB_WARN_ON_RET(cpu_buffer, list_empty(pages));
503 page = list_entry(p, struct buffer_page, list);
504 list_del_init(&page->list);
505 list_add_tail(&page->list, &cpu_buffer->pages);
507 rb_reset_cpu(cpu_buffer);
509 rb_check_pages(cpu_buffer);
511 atomic_dec(&cpu_buffer->record_disabled);
515 * ring_buffer_resize - resize the ring buffer
516 * @buffer: the buffer to resize.
517 * @size: the new size.
519 * The tracer is responsible for making sure that the buffer is
520 * not being used while changing the size.
521 * Note: We may be able to change the above requirement by using
522 * RCU synchronizations.
524 * Minimum size is 2 * BUF_PAGE_SIZE.
526 * Returns -1 on failure.
528 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
530 struct ring_buffer_per_cpu *cpu_buffer;
531 unsigned nr_pages, rm_pages, new_pages;
532 struct buffer_page *page, *tmp;
533 unsigned long buffer_size;
538 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
539 size *= BUF_PAGE_SIZE;
540 buffer_size = buffer->pages * BUF_PAGE_SIZE;
542 /* we need a minimum of two pages */
543 if (size < BUF_PAGE_SIZE * 2)
544 size = BUF_PAGE_SIZE * 2;
546 if (size == buffer_size)
549 mutex_lock(&buffer->mutex);
551 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
553 if (size < buffer_size) {
555 /* easy case, just free pages */
556 RB_WARN_ON_UNLOCK(buffer, nr_pages >= buffer->pages);
558 rm_pages = buffer->pages - nr_pages;
560 for_each_buffer_cpu(buffer, cpu) {
561 cpu_buffer = buffer->buffers[cpu];
562 rb_remove_pages(cpu_buffer, rm_pages);
568 * This is a bit more difficult. We only want to add pages
569 * when we can allocate enough for all CPUs. We do this
570 * by allocating all the pages and storing them on a local
571 * link list. If we succeed in our allocation, then we
572 * add these pages to the cpu_buffers. Otherwise we just free
573 * them all and return -ENOMEM;
575 RB_WARN_ON_UNLOCK(buffer, nr_pages <= buffer->pages);
577 new_pages = nr_pages - buffer->pages;
579 for_each_buffer_cpu(buffer, cpu) {
580 for (i = 0; i < new_pages; i++) {
581 page = kzalloc_node(ALIGN(sizeof(*page),
583 GFP_KERNEL, cpu_to_node(cpu));
586 list_add(&page->list, &pages);
587 addr = __get_free_page(GFP_KERNEL);
590 page->page = (void *)addr;
594 for_each_buffer_cpu(buffer, cpu) {
595 cpu_buffer = buffer->buffers[cpu];
596 rb_insert_pages(cpu_buffer, &pages, new_pages);
599 RB_WARN_ON_UNLOCK(buffer, !list_empty(&pages));
602 buffer->pages = nr_pages;
603 mutex_unlock(&buffer->mutex);
608 list_for_each_entry_safe(page, tmp, &pages, list) {
609 list_del_init(&page->list);
610 free_buffer_page(page);
615 static inline int rb_null_event(struct ring_buffer_event *event)
617 return event->type == RINGBUF_TYPE_PADDING;
620 static inline void *__rb_page_index(struct buffer_page *page, unsigned index)
622 return page->page + index;
625 static inline struct ring_buffer_event *
626 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
628 return __rb_page_index(cpu_buffer->reader_page,
629 cpu_buffer->reader_page->read);
632 static inline struct ring_buffer_event *
633 rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
635 return __rb_page_index(cpu_buffer->head_page,
636 cpu_buffer->head_page->read);
639 static inline struct ring_buffer_event *
640 rb_iter_head_event(struct ring_buffer_iter *iter)
642 return __rb_page_index(iter->head_page, iter->head);
645 static inline unsigned rb_page_write(struct buffer_page *bpage)
647 return local_read(&bpage->write);
650 static inline unsigned rb_page_commit(struct buffer_page *bpage)
652 return local_read(&bpage->commit);
655 /* Size is determined by what has been commited */
656 static inline unsigned rb_page_size(struct buffer_page *bpage)
658 return rb_page_commit(bpage);
661 static inline unsigned
662 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
664 return rb_page_commit(cpu_buffer->commit_page);
667 static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
669 return rb_page_commit(cpu_buffer->head_page);
673 * When the tail hits the head and the buffer is in overwrite mode,
674 * the head jumps to the next page and all content on the previous
675 * page is discarded. But before doing so, we update the overrun
676 * variable of the buffer.
678 static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
680 struct ring_buffer_event *event;
683 for (head = 0; head < rb_head_size(cpu_buffer);
684 head += rb_event_length(event)) {
686 event = __rb_page_index(cpu_buffer->head_page, head);
687 RB_WARN_ON_RET(cpu_buffer, rb_null_event(event));
688 /* Only count data entries */
689 if (event->type != RINGBUF_TYPE_DATA)
691 cpu_buffer->overrun++;
692 cpu_buffer->entries--;
696 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
697 struct buffer_page **page)
699 struct list_head *p = (*page)->list.next;
701 if (p == &cpu_buffer->pages)
704 *page = list_entry(p, struct buffer_page, list);
707 static inline unsigned
708 rb_event_index(struct ring_buffer_event *event)
710 unsigned long addr = (unsigned long)event;
712 return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
716 rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
717 struct ring_buffer_event *event)
719 unsigned long addr = (unsigned long)event;
722 index = rb_event_index(event);
725 return cpu_buffer->commit_page->page == (void *)addr &&
726 rb_commit_index(cpu_buffer) == index;
730 rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
731 struct ring_buffer_event *event)
733 unsigned long addr = (unsigned long)event;
736 index = rb_event_index(event);
739 while (cpu_buffer->commit_page->page != (void *)addr) {
740 RB_WARN_ON(cpu_buffer,
741 cpu_buffer->commit_page == cpu_buffer->tail_page);
742 cpu_buffer->commit_page->commit =
743 cpu_buffer->commit_page->write;
744 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
745 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
748 /* Now set the commit to the event's index */
749 local_set(&cpu_buffer->commit_page->commit, index);
753 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
756 * We only race with interrupts and NMIs on this CPU.
757 * If we own the commit event, then we can commit
758 * all others that interrupted us, since the interruptions
759 * are in stack format (they finish before they come
760 * back to us). This allows us to do a simple loop to
761 * assign the commit to the tail.
763 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
764 cpu_buffer->commit_page->commit =
765 cpu_buffer->commit_page->write;
766 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
767 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
768 /* add barrier to keep gcc from optimizing too much */
771 while (rb_commit_index(cpu_buffer) !=
772 rb_page_write(cpu_buffer->commit_page)) {
773 cpu_buffer->commit_page->commit =
774 cpu_buffer->commit_page->write;
779 static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
781 cpu_buffer->read_stamp = cpu_buffer->reader_page->time_stamp;
782 cpu_buffer->reader_page->read = 0;
785 static inline void rb_inc_iter(struct ring_buffer_iter *iter)
787 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
790 * The iterator could be on the reader page (it starts there).
791 * But the head could have moved, since the reader was
792 * found. Check for this case and assign the iterator
793 * to the head page instead of next.
795 if (iter->head_page == cpu_buffer->reader_page)
796 iter->head_page = cpu_buffer->head_page;
798 rb_inc_page(cpu_buffer, &iter->head_page);
800 iter->read_stamp = iter->head_page->time_stamp;
805 * ring_buffer_update_event - update event type and data
806 * @event: the even to update
807 * @type: the type of event
808 * @length: the size of the event field in the ring buffer
810 * Update the type and data fields of the event. The length
811 * is the actual size that is written to the ring buffer,
812 * and with this, we can determine what to place into the
816 rb_update_event(struct ring_buffer_event *event,
817 unsigned type, unsigned length)
823 case RINGBUF_TYPE_PADDING:
826 case RINGBUF_TYPE_TIME_EXTEND:
828 (RB_LEN_TIME_EXTEND + (RB_ALIGNMENT-1))
829 >> RB_ALIGNMENT_SHIFT;
832 case RINGBUF_TYPE_TIME_STAMP:
834 (RB_LEN_TIME_STAMP + (RB_ALIGNMENT-1))
835 >> RB_ALIGNMENT_SHIFT;
838 case RINGBUF_TYPE_DATA:
839 length -= RB_EVNT_HDR_SIZE;
840 if (length > RB_MAX_SMALL_DATA) {
842 event->array[0] = length;
845 (length + (RB_ALIGNMENT-1))
846 >> RB_ALIGNMENT_SHIFT;
853 static inline unsigned rb_calculate_event_length(unsigned length)
855 struct ring_buffer_event event; /* Used only for sizeof array */
857 /* zero length can cause confusions */
861 if (length > RB_MAX_SMALL_DATA)
862 length += sizeof(event.array[0]);
864 length += RB_EVNT_HDR_SIZE;
865 length = ALIGN(length, RB_ALIGNMENT);
870 static struct ring_buffer_event *
871 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
872 unsigned type, unsigned long length, u64 *ts)
874 struct buffer_page *tail_page, *head_page, *reader_page;
875 unsigned long tail, write;
876 struct ring_buffer *buffer = cpu_buffer->buffer;
877 struct ring_buffer_event *event;
880 tail_page = cpu_buffer->tail_page;
881 write = local_add_return(length, &tail_page->write);
882 tail = write - length;
884 /* See if we shot pass the end of this buffer page */
885 if (write > BUF_PAGE_SIZE) {
886 struct buffer_page *next_page = tail_page;
888 local_irq_save(flags);
889 __raw_spin_lock(&cpu_buffer->lock);
891 rb_inc_page(cpu_buffer, &next_page);
893 head_page = cpu_buffer->head_page;
894 reader_page = cpu_buffer->reader_page;
896 /* we grabbed the lock before incrementing */
897 RB_WARN_ON(cpu_buffer, next_page == reader_page);
900 * If for some reason, we had an interrupt storm that made
901 * it all the way around the buffer, bail, and warn
904 if (unlikely(next_page == cpu_buffer->commit_page)) {
909 if (next_page == head_page) {
910 if (!(buffer->flags & RB_FL_OVERWRITE)) {
912 if (tail <= BUF_PAGE_SIZE)
913 local_set(&tail_page->write, tail);
917 /* tail_page has not moved yet? */
918 if (tail_page == cpu_buffer->tail_page) {
919 /* count overflows */
920 rb_update_overflow(cpu_buffer);
922 rb_inc_page(cpu_buffer, &head_page);
923 cpu_buffer->head_page = head_page;
924 cpu_buffer->head_page->read = 0;
929 * If the tail page is still the same as what we think
930 * it is, then it is up to us to update the tail
933 if (tail_page == cpu_buffer->tail_page) {
934 local_set(&next_page->write, 0);
935 local_set(&next_page->commit, 0);
936 cpu_buffer->tail_page = next_page;
938 /* reread the time stamp */
939 *ts = ring_buffer_time_stamp(cpu_buffer->cpu);
940 cpu_buffer->tail_page->time_stamp = *ts;
944 * The actual tail page has moved forward.
946 if (tail < BUF_PAGE_SIZE) {
947 /* Mark the rest of the page with padding */
948 event = __rb_page_index(tail_page, tail);
949 event->type = RINGBUF_TYPE_PADDING;
952 if (tail <= BUF_PAGE_SIZE)
953 /* Set the write back to the previous setting */
954 local_set(&tail_page->write, tail);
957 * If this was a commit entry that failed,
960 if (tail_page == cpu_buffer->commit_page &&
961 tail == rb_commit_index(cpu_buffer)) {
962 rb_set_commit_to_write(cpu_buffer);
965 __raw_spin_unlock(&cpu_buffer->lock);
966 local_irq_restore(flags);
968 /* fail and let the caller try again */
969 return ERR_PTR(-EAGAIN);
972 /* We reserved something on the buffer */
974 RB_WARN_ON_RET_NULL(cpu_buffer, write > BUF_PAGE_SIZE);
976 event = __rb_page_index(tail_page, tail);
977 rb_update_event(event, type, length);
980 * If this is a commit and the tail is zero, then update
981 * this page's time stamp.
983 if (!tail && rb_is_commit(cpu_buffer, event))
984 cpu_buffer->commit_page->time_stamp = *ts;
989 __raw_spin_unlock(&cpu_buffer->lock);
990 local_irq_restore(flags);
995 rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
998 struct ring_buffer_event *event;
1002 if (unlikely(*delta > (1ULL << 59) && !once++)) {
1003 printk(KERN_WARNING "Delta way too big! %llu"
1004 " ts=%llu write stamp = %llu\n",
1005 (unsigned long long)*delta,
1006 (unsigned long long)*ts,
1007 (unsigned long long)cpu_buffer->write_stamp);
1012 * The delta is too big, we to add a
1015 event = __rb_reserve_next(cpu_buffer,
1016 RINGBUF_TYPE_TIME_EXTEND,
1022 if (PTR_ERR(event) == -EAGAIN)
1025 /* Only a commited time event can update the write stamp */
1026 if (rb_is_commit(cpu_buffer, event)) {
1028 * If this is the first on the page, then we need to
1029 * update the page itself, and just put in a zero.
1031 if (rb_event_index(event)) {
1032 event->time_delta = *delta & TS_MASK;
1033 event->array[0] = *delta >> TS_SHIFT;
1035 cpu_buffer->commit_page->time_stamp = *ts;
1036 event->time_delta = 0;
1037 event->array[0] = 0;
1039 cpu_buffer->write_stamp = *ts;
1040 /* let the caller know this was the commit */
1043 /* Darn, this is just wasted space */
1044 event->time_delta = 0;
1045 event->array[0] = 0;
1054 static struct ring_buffer_event *
1055 rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1056 unsigned type, unsigned long length)
1058 struct ring_buffer_event *event;
1065 * We allow for interrupts to reenter here and do a trace.
1066 * If one does, it will cause this original code to loop
1067 * back here. Even with heavy interrupts happening, this
1068 * should only happen a few times in a row. If this happens
1069 * 1000 times in a row, there must be either an interrupt
1070 * storm or we have something buggy.
1073 if (unlikely(++nr_loops > 1000)) {
1074 RB_WARN_ON(cpu_buffer, 1);
1078 ts = ring_buffer_time_stamp(cpu_buffer->cpu);
1081 * Only the first commit can update the timestamp.
1082 * Yes there is a race here. If an interrupt comes in
1083 * just after the conditional and it traces too, then it
1084 * will also check the deltas. More than one timestamp may
1085 * also be made. But only the entry that did the actual
1086 * commit will be something other than zero.
1088 if (cpu_buffer->tail_page == cpu_buffer->commit_page &&
1089 rb_page_write(cpu_buffer->tail_page) ==
1090 rb_commit_index(cpu_buffer)) {
1092 delta = ts - cpu_buffer->write_stamp;
1094 /* make sure this delta is calculated here */
1097 /* Did the write stamp get updated already? */
1098 if (unlikely(ts < cpu_buffer->write_stamp))
1101 if (test_time_stamp(delta)) {
1103 commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
1105 if (commit == -EBUSY)
1108 if (commit == -EAGAIN)
1111 RB_WARN_ON(cpu_buffer, commit < 0);
1114 /* Non commits have zero deltas */
1117 event = __rb_reserve_next(cpu_buffer, type, length, &ts);
1118 if (PTR_ERR(event) == -EAGAIN)
1122 if (unlikely(commit))
1124 * Ouch! We needed a timestamp and it was commited. But
1125 * we didn't get our event reserved.
1127 rb_set_commit_to_write(cpu_buffer);
1132 * If the timestamp was commited, make the commit our entry
1133 * now so that we will update it when needed.
1136 rb_set_commit_event(cpu_buffer, event);
1137 else if (!rb_is_commit(cpu_buffer, event))
1140 event->time_delta = delta;
1145 static DEFINE_PER_CPU(int, rb_need_resched);
1148 * ring_buffer_lock_reserve - reserve a part of the buffer
1149 * @buffer: the ring buffer to reserve from
1150 * @length: the length of the data to reserve (excluding event header)
1151 * @flags: a pointer to save the interrupt flags
1153 * Returns a reseverd event on the ring buffer to copy directly to.
1154 * The user of this interface will need to get the body to write into
1155 * and can use the ring_buffer_event_data() interface.
1157 * The length is the length of the data needed, not the event length
1158 * which also includes the event header.
1160 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
1161 * If NULL is returned, then nothing has been allocated or locked.
1163 struct ring_buffer_event *
1164 ring_buffer_lock_reserve(struct ring_buffer *buffer,
1165 unsigned long length,
1166 unsigned long *flags)
1168 struct ring_buffer_per_cpu *cpu_buffer;
1169 struct ring_buffer_event *event;
1172 if (atomic_read(&buffer->record_disabled))
1175 /* If we are tracing schedule, we don't want to recurse */
1176 resched = ftrace_preempt_disable();
1178 cpu = raw_smp_processor_id();
1180 if (!cpu_isset(cpu, buffer->cpumask))
1183 cpu_buffer = buffer->buffers[cpu];
1185 if (atomic_read(&cpu_buffer->record_disabled))
1188 length = rb_calculate_event_length(length);
1189 if (length > BUF_PAGE_SIZE)
1192 event = rb_reserve_next_event(cpu_buffer, RINGBUF_TYPE_DATA, length);
1197 * Need to store resched state on this cpu.
1198 * Only the first needs to.
1201 if (preempt_count() == 1)
1202 per_cpu(rb_need_resched, cpu) = resched;
1207 ftrace_preempt_enable(resched);
1211 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1212 struct ring_buffer_event *event)
1214 cpu_buffer->entries++;
1216 /* Only process further if we own the commit */
1217 if (!rb_is_commit(cpu_buffer, event))
1220 cpu_buffer->write_stamp += event->time_delta;
1222 rb_set_commit_to_write(cpu_buffer);
1226 * ring_buffer_unlock_commit - commit a reserved
1227 * @buffer: The buffer to commit to
1228 * @event: The event pointer to commit.
1229 * @flags: the interrupt flags received from ring_buffer_lock_reserve.
1231 * This commits the data to the ring buffer, and releases any locks held.
1233 * Must be paired with ring_buffer_lock_reserve.
1235 int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1236 struct ring_buffer_event *event,
1237 unsigned long flags)
1239 struct ring_buffer_per_cpu *cpu_buffer;
1240 int cpu = raw_smp_processor_id();
1242 cpu_buffer = buffer->buffers[cpu];
1244 rb_commit(cpu_buffer, event);
1247 * Only the last preempt count needs to restore preemption.
1249 if (preempt_count() == 1)
1250 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1252 preempt_enable_no_resched_notrace();
1258 * ring_buffer_write - write data to the buffer without reserving
1259 * @buffer: The ring buffer to write to.
1260 * @length: The length of the data being written (excluding the event header)
1261 * @data: The data to write to the buffer.
1263 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
1264 * one function. If you already have the data to write to the buffer, it
1265 * may be easier to simply call this function.
1267 * Note, like ring_buffer_lock_reserve, the length is the length of the data
1268 * and not the length of the event which would hold the header.
1270 int ring_buffer_write(struct ring_buffer *buffer,
1271 unsigned long length,
1274 struct ring_buffer_per_cpu *cpu_buffer;
1275 struct ring_buffer_event *event;
1276 unsigned long event_length;
1281 if (atomic_read(&buffer->record_disabled))
1284 resched = ftrace_preempt_disable();
1286 cpu = raw_smp_processor_id();
1288 if (!cpu_isset(cpu, buffer->cpumask))
1291 cpu_buffer = buffer->buffers[cpu];
1293 if (atomic_read(&cpu_buffer->record_disabled))
1296 event_length = rb_calculate_event_length(length);
1297 event = rb_reserve_next_event(cpu_buffer,
1298 RINGBUF_TYPE_DATA, event_length);
1302 body = rb_event_data(event);
1304 memcpy(body, data, length);
1306 rb_commit(cpu_buffer, event);
1310 ftrace_preempt_enable(resched);
1315 static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1317 struct buffer_page *reader = cpu_buffer->reader_page;
1318 struct buffer_page *head = cpu_buffer->head_page;
1319 struct buffer_page *commit = cpu_buffer->commit_page;
1321 return reader->read == rb_page_commit(reader) &&
1322 (commit == reader ||
1324 head->read == rb_page_commit(commit)));
1328 * ring_buffer_record_disable - stop all writes into the buffer
1329 * @buffer: The ring buffer to stop writes to.
1331 * This prevents all writes to the buffer. Any attempt to write
1332 * to the buffer after this will fail and return NULL.
1334 * The caller should call synchronize_sched() after this.
1336 void ring_buffer_record_disable(struct ring_buffer *buffer)
1338 atomic_inc(&buffer->record_disabled);
1342 * ring_buffer_record_enable - enable writes to the buffer
1343 * @buffer: The ring buffer to enable writes
1345 * Note, multiple disables will need the same number of enables
1346 * to truely enable the writing (much like preempt_disable).
1348 void ring_buffer_record_enable(struct ring_buffer *buffer)
1350 atomic_dec(&buffer->record_disabled);
1354 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1355 * @buffer: The ring buffer to stop writes to.
1356 * @cpu: The CPU buffer to stop
1358 * This prevents all writes to the buffer. Any attempt to write
1359 * to the buffer after this will fail and return NULL.
1361 * The caller should call synchronize_sched() after this.
1363 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1365 struct ring_buffer_per_cpu *cpu_buffer;
1367 if (!cpu_isset(cpu, buffer->cpumask))
1370 cpu_buffer = buffer->buffers[cpu];
1371 atomic_inc(&cpu_buffer->record_disabled);
1375 * ring_buffer_record_enable_cpu - enable writes to the buffer
1376 * @buffer: The ring buffer to enable writes
1377 * @cpu: The CPU to enable.
1379 * Note, multiple disables will need the same number of enables
1380 * to truely enable the writing (much like preempt_disable).
1382 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1384 struct ring_buffer_per_cpu *cpu_buffer;
1386 if (!cpu_isset(cpu, buffer->cpumask))
1389 cpu_buffer = buffer->buffers[cpu];
1390 atomic_dec(&cpu_buffer->record_disabled);
1394 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1395 * @buffer: The ring buffer
1396 * @cpu: The per CPU buffer to get the entries from.
1398 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1400 struct ring_buffer_per_cpu *cpu_buffer;
1402 if (!cpu_isset(cpu, buffer->cpumask))
1405 cpu_buffer = buffer->buffers[cpu];
1406 return cpu_buffer->entries;
1410 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1411 * @buffer: The ring buffer
1412 * @cpu: The per CPU buffer to get the number of overruns from
1414 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1416 struct ring_buffer_per_cpu *cpu_buffer;
1418 if (!cpu_isset(cpu, buffer->cpumask))
1421 cpu_buffer = buffer->buffers[cpu];
1422 return cpu_buffer->overrun;
1426 * ring_buffer_entries - get the number of entries in a buffer
1427 * @buffer: The ring buffer
1429 * Returns the total number of entries in the ring buffer
1432 unsigned long ring_buffer_entries(struct ring_buffer *buffer)
1434 struct ring_buffer_per_cpu *cpu_buffer;
1435 unsigned long entries = 0;
1438 /* if you care about this being correct, lock the buffer */
1439 for_each_buffer_cpu(buffer, cpu) {
1440 cpu_buffer = buffer->buffers[cpu];
1441 entries += cpu_buffer->entries;
1448 * ring_buffer_overrun_cpu - get the number of overruns in buffer
1449 * @buffer: The ring buffer
1451 * Returns the total number of overruns in the ring buffer
1454 unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
1456 struct ring_buffer_per_cpu *cpu_buffer;
1457 unsigned long overruns = 0;
1460 /* if you care about this being correct, lock the buffer */
1461 for_each_buffer_cpu(buffer, cpu) {
1462 cpu_buffer = buffer->buffers[cpu];
1463 overruns += cpu_buffer->overrun;
1470 * ring_buffer_iter_reset - reset an iterator
1471 * @iter: The iterator to reset
1473 * Resets the iterator, so that it will start from the beginning
1476 void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1478 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1480 /* Iterator usage is expected to have record disabled */
1481 if (list_empty(&cpu_buffer->reader_page->list)) {
1482 iter->head_page = cpu_buffer->head_page;
1483 iter->head = cpu_buffer->head_page->read;
1485 iter->head_page = cpu_buffer->reader_page;
1486 iter->head = cpu_buffer->reader_page->read;
1489 iter->read_stamp = cpu_buffer->read_stamp;
1491 iter->read_stamp = iter->head_page->time_stamp;
1495 * ring_buffer_iter_empty - check if an iterator has no more to read
1496 * @iter: The iterator to check
1498 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
1500 struct ring_buffer_per_cpu *cpu_buffer;
1502 cpu_buffer = iter->cpu_buffer;
1504 return iter->head_page == cpu_buffer->commit_page &&
1505 iter->head == rb_commit_index(cpu_buffer);
1509 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1510 struct ring_buffer_event *event)
1514 switch (event->type) {
1515 case RINGBUF_TYPE_PADDING:
1518 case RINGBUF_TYPE_TIME_EXTEND:
1519 delta = event->array[0];
1521 delta += event->time_delta;
1522 cpu_buffer->read_stamp += delta;
1525 case RINGBUF_TYPE_TIME_STAMP:
1526 /* FIXME: not implemented */
1529 case RINGBUF_TYPE_DATA:
1530 cpu_buffer->read_stamp += event->time_delta;
1540 rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
1541 struct ring_buffer_event *event)
1545 switch (event->type) {
1546 case RINGBUF_TYPE_PADDING:
1549 case RINGBUF_TYPE_TIME_EXTEND:
1550 delta = event->array[0];
1552 delta += event->time_delta;
1553 iter->read_stamp += delta;
1556 case RINGBUF_TYPE_TIME_STAMP:
1557 /* FIXME: not implemented */
1560 case RINGBUF_TYPE_DATA:
1561 iter->read_stamp += event->time_delta;
1570 static struct buffer_page *
1571 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1573 struct buffer_page *reader = NULL;
1574 unsigned long flags;
1577 local_irq_save(flags);
1578 __raw_spin_lock(&cpu_buffer->lock);
1582 * This should normally only loop twice. But because the
1583 * start of the reader inserts an empty page, it causes
1584 * a case where we will loop three times. There should be no
1585 * reason to loop four times (that I know of).
1587 if (unlikely(++nr_loops > 3)) {
1588 RB_WARN_ON(cpu_buffer, 1);
1593 reader = cpu_buffer->reader_page;
1595 /* If there's more to read, return this page */
1596 if (cpu_buffer->reader_page->read < rb_page_size(reader))
1599 /* Never should we have an index greater than the size */
1600 RB_WARN_ON(cpu_buffer,
1601 cpu_buffer->reader_page->read > rb_page_size(reader));
1603 /* check if we caught up to the tail */
1605 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
1609 * Splice the empty reader page into the list around the head.
1610 * Reset the reader page to size zero.
1613 reader = cpu_buffer->head_page;
1614 cpu_buffer->reader_page->list.next = reader->list.next;
1615 cpu_buffer->reader_page->list.prev = reader->list.prev;
1617 local_set(&cpu_buffer->reader_page->write, 0);
1618 local_set(&cpu_buffer->reader_page->commit, 0);
1620 /* Make the reader page now replace the head */
1621 reader->list.prev->next = &cpu_buffer->reader_page->list;
1622 reader->list.next->prev = &cpu_buffer->reader_page->list;
1625 * If the tail is on the reader, then we must set the head
1626 * to the inserted page, otherwise we set it one before.
1628 cpu_buffer->head_page = cpu_buffer->reader_page;
1630 if (cpu_buffer->commit_page != reader)
1631 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
1633 /* Finally update the reader page to the new head */
1634 cpu_buffer->reader_page = reader;
1635 rb_reset_reader_page(cpu_buffer);
1640 __raw_spin_unlock(&cpu_buffer->lock);
1641 local_irq_restore(flags);
1646 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
1648 struct ring_buffer_event *event;
1649 struct buffer_page *reader;
1652 reader = rb_get_reader_page(cpu_buffer);
1654 /* This function should not be called when buffer is empty */
1655 RB_WARN_ON_RET(cpu_buffer, !reader);
1657 event = rb_reader_event(cpu_buffer);
1659 if (event->type == RINGBUF_TYPE_DATA)
1660 cpu_buffer->entries--;
1662 rb_update_read_stamp(cpu_buffer, event);
1664 length = rb_event_length(event);
1665 cpu_buffer->reader_page->read += length;
1668 static void rb_advance_iter(struct ring_buffer_iter *iter)
1670 struct ring_buffer *buffer;
1671 struct ring_buffer_per_cpu *cpu_buffer;
1672 struct ring_buffer_event *event;
1675 cpu_buffer = iter->cpu_buffer;
1676 buffer = cpu_buffer->buffer;
1679 * Check if we are at the end of the buffer.
1681 if (iter->head >= rb_page_size(iter->head_page)) {
1682 RB_WARN_ON_RET(buffer,
1683 iter->head_page == cpu_buffer->commit_page);
1688 event = rb_iter_head_event(iter);
1690 length = rb_event_length(event);
1693 * This should not be called to advance the header if we are
1694 * at the tail of the buffer.
1696 RB_WARN_ON_RET(cpu_buffer,
1697 (iter->head_page == cpu_buffer->commit_page) &&
1698 (iter->head + length > rb_commit_index(cpu_buffer)));
1700 rb_update_iter_read_stamp(iter, event);
1702 iter->head += length;
1704 /* check for end of page padding */
1705 if ((iter->head >= rb_page_size(iter->head_page)) &&
1706 (iter->head_page != cpu_buffer->commit_page))
1707 rb_advance_iter(iter);
1711 * ring_buffer_peek - peek at the next event to be read
1712 * @buffer: The ring buffer to read
1713 * @cpu: The cpu to peak at
1714 * @ts: The timestamp counter of this event.
1716 * This will return the event that will be read next, but does
1717 * not consume the data.
1719 struct ring_buffer_event *
1720 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1722 struct ring_buffer_per_cpu *cpu_buffer;
1723 struct ring_buffer_event *event;
1724 struct buffer_page *reader;
1727 if (!cpu_isset(cpu, buffer->cpumask))
1730 cpu_buffer = buffer->buffers[cpu];
1734 * We repeat when a timestamp is encountered. It is possible
1735 * to get multiple timestamps from an interrupt entering just
1736 * as one timestamp is about to be written. The max times
1737 * that this can happen is the number of nested interrupts we
1738 * can have. Nesting 10 deep of interrupts is clearly
1741 if (unlikely(++nr_loops > 10)) {
1742 RB_WARN_ON(cpu_buffer, 1);
1746 reader = rb_get_reader_page(cpu_buffer);
1750 event = rb_reader_event(cpu_buffer);
1752 switch (event->type) {
1753 case RINGBUF_TYPE_PADDING:
1754 RB_WARN_ON(cpu_buffer, 1);
1755 rb_advance_reader(cpu_buffer);
1758 case RINGBUF_TYPE_TIME_EXTEND:
1759 /* Internal data, OK to advance */
1760 rb_advance_reader(cpu_buffer);
1763 case RINGBUF_TYPE_TIME_STAMP:
1764 /* FIXME: not implemented */
1765 rb_advance_reader(cpu_buffer);
1768 case RINGBUF_TYPE_DATA:
1770 *ts = cpu_buffer->read_stamp + event->time_delta;
1771 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1783 * ring_buffer_iter_peek - peek at the next event to be read
1784 * @iter: The ring buffer iterator
1785 * @ts: The timestamp counter of this event.
1787 * This will return the event that will be read next, but does
1788 * not increment the iterator.
1790 struct ring_buffer_event *
1791 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1793 struct ring_buffer *buffer;
1794 struct ring_buffer_per_cpu *cpu_buffer;
1795 struct ring_buffer_event *event;
1798 if (ring_buffer_iter_empty(iter))
1801 cpu_buffer = iter->cpu_buffer;
1802 buffer = cpu_buffer->buffer;
1806 * We repeat when a timestamp is encountered. It is possible
1807 * to get multiple timestamps from an interrupt entering just
1808 * as one timestamp is about to be written. The max times
1809 * that this can happen is the number of nested interrupts we
1810 * can have. Nesting 10 deep of interrupts is clearly
1813 if (unlikely(++nr_loops > 10)) {
1814 RB_WARN_ON(cpu_buffer, 1);
1818 if (rb_per_cpu_empty(cpu_buffer))
1821 event = rb_iter_head_event(iter);
1823 switch (event->type) {
1824 case RINGBUF_TYPE_PADDING:
1828 case RINGBUF_TYPE_TIME_EXTEND:
1829 /* Internal data, OK to advance */
1830 rb_advance_iter(iter);
1833 case RINGBUF_TYPE_TIME_STAMP:
1834 /* FIXME: not implemented */
1835 rb_advance_iter(iter);
1838 case RINGBUF_TYPE_DATA:
1840 *ts = iter->read_stamp + event->time_delta;
1841 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1853 * ring_buffer_consume - return an event and consume it
1854 * @buffer: The ring buffer to get the next event from
1856 * Returns the next event in the ring buffer, and that event is consumed.
1857 * Meaning, that sequential reads will keep returning a different event,
1858 * and eventually empty the ring buffer if the producer is slower.
1860 struct ring_buffer_event *
1861 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
1863 struct ring_buffer_per_cpu *cpu_buffer;
1864 struct ring_buffer_event *event;
1866 if (!cpu_isset(cpu, buffer->cpumask))
1869 event = ring_buffer_peek(buffer, cpu, ts);
1873 cpu_buffer = buffer->buffers[cpu];
1874 rb_advance_reader(cpu_buffer);
1880 * ring_buffer_read_start - start a non consuming read of the buffer
1881 * @buffer: The ring buffer to read from
1882 * @cpu: The cpu buffer to iterate over
1884 * This starts up an iteration through the buffer. It also disables
1885 * the recording to the buffer until the reading is finished.
1886 * This prevents the reading from being corrupted. This is not
1887 * a consuming read, so a producer is not expected.
1889 * Must be paired with ring_buffer_finish.
1891 struct ring_buffer_iter *
1892 ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
1894 struct ring_buffer_per_cpu *cpu_buffer;
1895 struct ring_buffer_iter *iter;
1896 unsigned long flags;
1898 if (!cpu_isset(cpu, buffer->cpumask))
1901 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
1905 cpu_buffer = buffer->buffers[cpu];
1907 iter->cpu_buffer = cpu_buffer;
1909 atomic_inc(&cpu_buffer->record_disabled);
1910 synchronize_sched();
1912 local_irq_save(flags);
1913 __raw_spin_lock(&cpu_buffer->lock);
1914 ring_buffer_iter_reset(iter);
1915 __raw_spin_unlock(&cpu_buffer->lock);
1916 local_irq_restore(flags);
1922 * ring_buffer_finish - finish reading the iterator of the buffer
1923 * @iter: The iterator retrieved by ring_buffer_start
1925 * This re-enables the recording to the buffer, and frees the
1929 ring_buffer_read_finish(struct ring_buffer_iter *iter)
1931 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1933 atomic_dec(&cpu_buffer->record_disabled);
1938 * ring_buffer_read - read the next item in the ring buffer by the iterator
1939 * @iter: The ring buffer iterator
1940 * @ts: The time stamp of the event read.
1942 * This reads the next event in the ring buffer and increments the iterator.
1944 struct ring_buffer_event *
1945 ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
1947 struct ring_buffer_event *event;
1949 event = ring_buffer_iter_peek(iter, ts);
1953 rb_advance_iter(iter);
1959 * ring_buffer_size - return the size of the ring buffer (in bytes)
1960 * @buffer: The ring buffer.
1962 unsigned long ring_buffer_size(struct ring_buffer *buffer)
1964 return BUF_PAGE_SIZE * buffer->pages;
1968 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
1970 cpu_buffer->head_page
1971 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
1972 local_set(&cpu_buffer->head_page->write, 0);
1973 local_set(&cpu_buffer->head_page->commit, 0);
1975 cpu_buffer->head_page->read = 0;
1977 cpu_buffer->tail_page = cpu_buffer->head_page;
1978 cpu_buffer->commit_page = cpu_buffer->head_page;
1980 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1981 local_set(&cpu_buffer->reader_page->write, 0);
1982 local_set(&cpu_buffer->reader_page->commit, 0);
1983 cpu_buffer->reader_page->read = 0;
1985 cpu_buffer->overrun = 0;
1986 cpu_buffer->entries = 0;
1990 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
1991 * @buffer: The ring buffer to reset a per cpu buffer of
1992 * @cpu: The CPU buffer to be reset
1994 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
1996 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
1997 unsigned long flags;
1999 if (!cpu_isset(cpu, buffer->cpumask))
2002 local_irq_save(flags);
2003 __raw_spin_lock(&cpu_buffer->lock);
2005 rb_reset_cpu(cpu_buffer);
2007 __raw_spin_unlock(&cpu_buffer->lock);
2008 local_irq_restore(flags);
2012 * ring_buffer_reset - reset a ring buffer
2013 * @buffer: The ring buffer to reset all cpu buffers
2015 void ring_buffer_reset(struct ring_buffer *buffer)
2019 for_each_buffer_cpu(buffer, cpu)
2020 ring_buffer_reset_cpu(buffer, cpu);
2024 * rind_buffer_empty - is the ring buffer empty?
2025 * @buffer: The ring buffer to test
2027 int ring_buffer_empty(struct ring_buffer *buffer)
2029 struct ring_buffer_per_cpu *cpu_buffer;
2032 /* yes this is racy, but if you don't like the race, lock the buffer */
2033 for_each_buffer_cpu(buffer, cpu) {
2034 cpu_buffer = buffer->buffers[cpu];
2035 if (!rb_per_cpu_empty(cpu_buffer))
2042 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
2043 * @buffer: The ring buffer
2044 * @cpu: The CPU buffer to test
2046 int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2048 struct ring_buffer_per_cpu *cpu_buffer;
2050 if (!cpu_isset(cpu, buffer->cpumask))
2053 cpu_buffer = buffer->buffers[cpu];
2054 return rb_per_cpu_empty(cpu_buffer);
2058 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
2059 * @buffer_a: One buffer to swap with
2060 * @buffer_b: The other buffer to swap with
2062 * This function is useful for tracers that want to take a "snapshot"
2063 * of a CPU buffer and has another back up buffer lying around.
2064 * it is expected that the tracer handles the cpu buffer not being
2065 * used at the moment.
2067 int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2068 struct ring_buffer *buffer_b, int cpu)
2070 struct ring_buffer_per_cpu *cpu_buffer_a;
2071 struct ring_buffer_per_cpu *cpu_buffer_b;
2073 if (!cpu_isset(cpu, buffer_a->cpumask) ||
2074 !cpu_isset(cpu, buffer_b->cpumask))
2077 /* At least make sure the two buffers are somewhat the same */
2078 if (buffer_a->size != buffer_b->size ||
2079 buffer_a->pages != buffer_b->pages)
2082 cpu_buffer_a = buffer_a->buffers[cpu];
2083 cpu_buffer_b = buffer_b->buffers[cpu];
2086 * We can't do a synchronize_sched here because this
2087 * function can be called in atomic context.
2088 * Normally this will be called from the same CPU as cpu.
2089 * If not it's up to the caller to protect this.
2091 atomic_inc(&cpu_buffer_a->record_disabled);
2092 atomic_inc(&cpu_buffer_b->record_disabled);
2094 buffer_a->buffers[cpu] = cpu_buffer_b;
2095 buffer_b->buffers[cpu] = cpu_buffer_a;
2097 cpu_buffer_b->buffer = buffer_a;
2098 cpu_buffer_a->buffer = buffer_b;
2100 atomic_dec(&cpu_buffer_a->record_disabled);
2101 atomic_dec(&cpu_buffer_b->record_disabled);