4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
6 #include <linux/ring_buffer.h>
7 #include <linux/spinlock.h>
8 #include <linux/debugfs.h>
9 #include <linux/uaccess.h>
10 #include <linux/module.h>
11 #include <linux/percpu.h>
12 #include <linux/mutex.h>
13 #include <linux/sched.h> /* used for sched_clock() (for now) */
14 #include <linux/init.h>
15 #include <linux/hash.h>
16 #include <linux/list.h>
19 /* Up this if you want to test the TIME_EXTENTS and normalization */
23 u64 ring_buffer_time_stamp(int cpu)
25 /* shift to debug/test normalization and TIME_EXTENTS */
26 return sched_clock() << DEBUG_SHIFT;
29 void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
31 /* Just stupid testing the normalize function and deltas */
35 #define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
36 #define RB_ALIGNMENT_SHIFT 2
37 #define RB_ALIGNMENT (1 << RB_ALIGNMENT_SHIFT)
38 #define RB_MAX_SMALL_DATA 28
41 RB_LEN_TIME_EXTEND = 8,
42 RB_LEN_TIME_STAMP = 16,
45 /* inline for ring buffer fast paths */
46 static inline unsigned
47 rb_event_length(struct ring_buffer_event *event)
51 switch (event->type) {
52 case RINGBUF_TYPE_PADDING:
56 case RINGBUF_TYPE_TIME_EXTEND:
57 return RB_LEN_TIME_EXTEND;
59 case RINGBUF_TYPE_TIME_STAMP:
60 return RB_LEN_TIME_STAMP;
62 case RINGBUF_TYPE_DATA:
64 length = event->len << RB_ALIGNMENT_SHIFT;
66 length = event->array[0];
67 return length + RB_EVNT_HDR_SIZE;
76 * ring_buffer_event_length - return the length of the event
77 * @event: the event to get the length of
79 unsigned ring_buffer_event_length(struct ring_buffer_event *event)
81 return rb_event_length(event);
84 /* inline for ring buffer fast paths */
86 rb_event_data(struct ring_buffer_event *event)
88 BUG_ON(event->type != RINGBUF_TYPE_DATA);
89 /* If length is in len field, then array[0] has the data */
91 return (void *)&event->array[0];
92 /* Otherwise length is in array[0] and array[1] has the data */
93 return (void *)&event->array[1];
97 * ring_buffer_event_data - return the data of the event
98 * @event: the event to get the data from
100 void *ring_buffer_event_data(struct ring_buffer_event *event)
102 return rb_event_data(event);
105 #define for_each_buffer_cpu(buffer, cpu) \
106 for_each_cpu_mask(cpu, buffer->cpumask)
109 #define TS_MASK ((1ULL << TS_SHIFT) - 1)
110 #define TS_DELTA_TEST (~TS_MASK)
113 * This hack stolen from mm/slob.c.
114 * We can store per page timing information in the page frame of the page.
115 * Thanks to Peter Zijlstra for suggesting this idea.
120 unsigned long flags; /* mandatory */
121 atomic_t _count; /* mandatory */
122 u64 time_stamp; /* page time stamp */
123 unsigned size; /* size of page data */
124 struct list_head list; /* list of free pages */
131 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
134 static inline void free_buffer_page(struct buffer_page *bpage)
136 reset_page_mapcount(&bpage->page);
137 bpage->page.mapping = NULL;
138 __free_page(&bpage->page);
142 * We need to fit the time_stamp delta into 27 bits.
144 static inline int test_time_stamp(u64 delta)
146 if (delta & TS_DELTA_TEST)
151 #define BUF_PAGE_SIZE PAGE_SIZE
154 * head_page == tail_page && head == tail then buffer is empty.
156 struct ring_buffer_per_cpu {
158 struct ring_buffer *buffer;
160 struct lock_class_key lock_key;
161 struct list_head pages;
162 unsigned long head; /* read from head */
163 unsigned long tail; /* write to tail */
164 unsigned long reader;
165 struct buffer_page *head_page;
166 struct buffer_page *tail_page;
167 struct buffer_page *reader_page;
168 unsigned long overrun;
169 unsigned long entries;
172 atomic_t record_disabled;
181 atomic_t record_disabled;
185 struct ring_buffer_per_cpu **buffers;
188 struct ring_buffer_iter {
189 struct ring_buffer_per_cpu *cpu_buffer;
191 struct buffer_page *head_page;
195 #define RB_WARN_ON(buffer, cond) \
196 if (unlikely(cond)) { \
197 atomic_inc(&buffer->record_disabled); \
203 * check_pages - integrity check of buffer pages
204 * @cpu_buffer: CPU buffer with pages to test
206 * As a safty measure we check to make sure the data pages have not
209 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
211 struct list_head *head = &cpu_buffer->pages;
212 struct buffer_page *page, *tmp;
214 RB_WARN_ON(cpu_buffer, head->next->prev != head);
215 RB_WARN_ON(cpu_buffer, head->prev->next != head);
217 list_for_each_entry_safe(page, tmp, head, list) {
218 RB_WARN_ON(cpu_buffer, page->list.next->prev != &page->list);
219 RB_WARN_ON(cpu_buffer, page->list.prev->next != &page->list);
225 static unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
227 return cpu_buffer->head_page->size;
230 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
233 struct list_head *head = &cpu_buffer->pages;
234 struct buffer_page *page, *tmp;
239 for (i = 0; i < nr_pages; i++) {
240 addr = __get_free_page(GFP_KERNEL);
243 page = (struct buffer_page *)virt_to_page(addr);
244 list_add(&page->list, &pages);
247 list_splice(&pages, head);
249 rb_check_pages(cpu_buffer);
254 list_for_each_entry_safe(page, tmp, &pages, list) {
255 list_del_init(&page->list);
256 free_buffer_page(page);
261 static struct ring_buffer_per_cpu *
262 rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
264 struct ring_buffer_per_cpu *cpu_buffer;
268 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
269 GFP_KERNEL, cpu_to_node(cpu));
273 cpu_buffer->cpu = cpu;
274 cpu_buffer->buffer = buffer;
275 spin_lock_init(&cpu_buffer->lock);
276 INIT_LIST_HEAD(&cpu_buffer->pages);
278 addr = __get_free_page(GFP_KERNEL);
280 goto fail_free_buffer;
281 cpu_buffer->reader_page = (struct buffer_page *)virt_to_page(addr);
282 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
283 cpu_buffer->reader_page->size = 0;
285 ret = rb_allocate_pages(cpu_buffer, buffer->pages);
287 goto fail_free_reader;
289 cpu_buffer->head_page
290 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
291 cpu_buffer->tail_page
292 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
297 free_buffer_page(cpu_buffer->reader_page);
304 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
306 struct list_head *head = &cpu_buffer->pages;
307 struct buffer_page *page, *tmp;
309 list_del_init(&cpu_buffer->reader_page->list);
310 free_buffer_page(cpu_buffer->reader_page);
312 list_for_each_entry_safe(page, tmp, head, list) {
313 list_del_init(&page->list);
314 free_buffer_page(page);
320 * Causes compile errors if the struct buffer_page gets bigger
321 * than the struct page.
323 extern int ring_buffer_page_too_big(void);
326 * ring_buffer_alloc - allocate a new ring_buffer
327 * @size: the size in bytes that is needed.
328 * @flags: attributes to set for the ring buffer.
330 * Currently the only flag that is available is the RB_FL_OVERWRITE
331 * flag. This flag means that the buffer will overwrite old data
332 * when the buffer wraps. If this flag is not set, the buffer will
333 * drop data when the tail hits the head.
335 struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
337 struct ring_buffer *buffer;
341 /* Paranoid! Optimizes out when all is well */
342 if (sizeof(struct buffer_page) > sizeof(struct page))
343 ring_buffer_page_too_big();
346 /* keep it in its own cache line */
347 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
352 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
353 buffer->flags = flags;
355 /* need at least two pages */
356 if (buffer->pages == 1)
359 buffer->cpumask = cpu_possible_map;
360 buffer->cpus = nr_cpu_ids;
362 bsize = sizeof(void *) * nr_cpu_ids;
363 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
365 if (!buffer->buffers)
366 goto fail_free_buffer;
368 for_each_buffer_cpu(buffer, cpu) {
369 buffer->buffers[cpu] =
370 rb_allocate_cpu_buffer(buffer, cpu);
371 if (!buffer->buffers[cpu])
372 goto fail_free_buffers;
375 mutex_init(&buffer->mutex);
380 for_each_buffer_cpu(buffer, cpu) {
381 if (buffer->buffers[cpu])
382 rb_free_cpu_buffer(buffer->buffers[cpu]);
384 kfree(buffer->buffers);
392 * ring_buffer_free - free a ring buffer.
393 * @buffer: the buffer to free.
396 ring_buffer_free(struct ring_buffer *buffer)
400 for_each_buffer_cpu(buffer, cpu)
401 rb_free_cpu_buffer(buffer->buffers[cpu]);
406 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
409 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
411 struct buffer_page *page;
415 atomic_inc(&cpu_buffer->record_disabled);
418 for (i = 0; i < nr_pages; i++) {
419 BUG_ON(list_empty(&cpu_buffer->pages));
420 p = cpu_buffer->pages.next;
421 page = list_entry(p, struct buffer_page, list);
422 list_del_init(&page->list);
423 free_buffer_page(page);
425 BUG_ON(list_empty(&cpu_buffer->pages));
427 rb_reset_cpu(cpu_buffer);
429 rb_check_pages(cpu_buffer);
431 atomic_dec(&cpu_buffer->record_disabled);
436 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
437 struct list_head *pages, unsigned nr_pages)
439 struct buffer_page *page;
443 atomic_inc(&cpu_buffer->record_disabled);
446 for (i = 0; i < nr_pages; i++) {
447 BUG_ON(list_empty(pages));
449 page = list_entry(p, struct buffer_page, list);
450 list_del_init(&page->list);
451 list_add_tail(&page->list, &cpu_buffer->pages);
453 rb_reset_cpu(cpu_buffer);
455 rb_check_pages(cpu_buffer);
457 atomic_dec(&cpu_buffer->record_disabled);
461 * ring_buffer_resize - resize the ring buffer
462 * @buffer: the buffer to resize.
463 * @size: the new size.
465 * The tracer is responsible for making sure that the buffer is
466 * not being used while changing the size.
467 * Note: We may be able to change the above requirement by using
468 * RCU synchronizations.
470 * Minimum size is 2 * BUF_PAGE_SIZE.
472 * Returns -1 on failure.
474 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
476 struct ring_buffer_per_cpu *cpu_buffer;
477 unsigned nr_pages, rm_pages, new_pages;
478 struct buffer_page *page, *tmp;
479 unsigned long buffer_size;
484 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
485 size *= BUF_PAGE_SIZE;
486 buffer_size = buffer->pages * BUF_PAGE_SIZE;
488 /* we need a minimum of two pages */
489 if (size < BUF_PAGE_SIZE * 2)
490 size = BUF_PAGE_SIZE * 2;
492 if (size == buffer_size)
495 mutex_lock(&buffer->mutex);
497 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
499 if (size < buffer_size) {
501 /* easy case, just free pages */
502 BUG_ON(nr_pages >= buffer->pages);
504 rm_pages = buffer->pages - nr_pages;
506 for_each_buffer_cpu(buffer, cpu) {
507 cpu_buffer = buffer->buffers[cpu];
508 rb_remove_pages(cpu_buffer, rm_pages);
514 * This is a bit more difficult. We only want to add pages
515 * when we can allocate enough for all CPUs. We do this
516 * by allocating all the pages and storing them on a local
517 * link list. If we succeed in our allocation, then we
518 * add these pages to the cpu_buffers. Otherwise we just free
519 * them all and return -ENOMEM;
521 BUG_ON(nr_pages <= buffer->pages);
522 new_pages = nr_pages - buffer->pages;
524 for_each_buffer_cpu(buffer, cpu) {
525 for (i = 0; i < new_pages; i++) {
526 addr = __get_free_page(GFP_KERNEL);
529 page = (struct buffer_page *)virt_to_page(addr);
530 list_add(&page->list, &pages);
534 for_each_buffer_cpu(buffer, cpu) {
535 cpu_buffer = buffer->buffers[cpu];
536 rb_insert_pages(cpu_buffer, &pages, new_pages);
539 BUG_ON(!list_empty(&pages));
542 buffer->pages = nr_pages;
543 mutex_unlock(&buffer->mutex);
548 list_for_each_entry_safe(page, tmp, &pages, list) {
549 list_del_init(&page->list);
550 free_buffer_page(page);
555 static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
557 return (cpu_buffer->reader == cpu_buffer->reader_page->size &&
558 (cpu_buffer->tail_page == cpu_buffer->reader_page ||
559 (cpu_buffer->tail_page == cpu_buffer->head_page &&
560 cpu_buffer->head == cpu_buffer->tail)));
563 static inline int rb_null_event(struct ring_buffer_event *event)
565 return event->type == RINGBUF_TYPE_PADDING;
568 static inline void *rb_page_index(struct buffer_page *page, unsigned index)
570 void *addr = page_address(&page->page);
575 static inline struct ring_buffer_event *
576 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
578 return rb_page_index(cpu_buffer->reader_page,
582 static inline struct ring_buffer_event *
583 rb_iter_head_event(struct ring_buffer_iter *iter)
585 return rb_page_index(iter->head_page,
590 * When the tail hits the head and the buffer is in overwrite mode,
591 * the head jumps to the next page and all content on the previous
592 * page is discarded. But before doing so, we update the overrun
593 * variable of the buffer.
595 static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
597 struct ring_buffer_event *event;
600 for (head = 0; head < rb_head_size(cpu_buffer);
601 head += rb_event_length(event)) {
603 event = rb_page_index(cpu_buffer->head_page, head);
604 BUG_ON(rb_null_event(event));
605 /* Only count data entries */
606 if (event->type != RINGBUF_TYPE_DATA)
608 cpu_buffer->overrun++;
609 cpu_buffer->entries--;
613 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
614 struct buffer_page **page)
616 struct list_head *p = (*page)->list.next;
618 if (p == &cpu_buffer->pages)
621 *page = list_entry(p, struct buffer_page, list);
625 rb_add_stamp(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts)
627 cpu_buffer->tail_page->time_stamp = *ts;
628 cpu_buffer->write_stamp = *ts;
631 static void rb_reset_head_page(struct ring_buffer_per_cpu *cpu_buffer)
633 cpu_buffer->head = 0;
636 static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
638 cpu_buffer->read_stamp = cpu_buffer->reader_page->time_stamp;
639 cpu_buffer->reader = 0;
642 static inline void rb_inc_iter(struct ring_buffer_iter *iter)
644 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
647 * The iterator could be on the reader page (it starts there).
648 * But the head could have moved, since the reader was
649 * found. Check for this case and assign the iterator
650 * to the head page instead of next.
652 if (iter->head_page == cpu_buffer->reader_page)
653 iter->head_page = cpu_buffer->head_page;
655 rb_inc_page(cpu_buffer, &iter->head_page);
657 iter->read_stamp = iter->head_page->time_stamp;
662 * ring_buffer_update_event - update event type and data
663 * @event: the even to update
664 * @type: the type of event
665 * @length: the size of the event field in the ring buffer
667 * Update the type and data fields of the event. The length
668 * is the actual size that is written to the ring buffer,
669 * and with this, we can determine what to place into the
673 rb_update_event(struct ring_buffer_event *event,
674 unsigned type, unsigned length)
680 case RINGBUF_TYPE_PADDING:
683 case RINGBUF_TYPE_TIME_EXTEND:
685 (RB_LEN_TIME_EXTEND + (RB_ALIGNMENT-1))
686 >> RB_ALIGNMENT_SHIFT;
689 case RINGBUF_TYPE_TIME_STAMP:
691 (RB_LEN_TIME_STAMP + (RB_ALIGNMENT-1))
692 >> RB_ALIGNMENT_SHIFT;
695 case RINGBUF_TYPE_DATA:
696 length -= RB_EVNT_HDR_SIZE;
697 if (length > RB_MAX_SMALL_DATA) {
699 event->array[0] = length;
702 (length + (RB_ALIGNMENT-1))
703 >> RB_ALIGNMENT_SHIFT;
710 static inline unsigned rb_calculate_event_length(unsigned length)
712 struct ring_buffer_event event; /* Used only for sizeof array */
714 /* zero length can cause confusions */
718 if (length > RB_MAX_SMALL_DATA)
719 length += sizeof(event.array[0]);
721 length += RB_EVNT_HDR_SIZE;
722 length = ALIGN(length, RB_ALIGNMENT);
727 static struct ring_buffer_event *
728 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
729 unsigned type, unsigned long length, u64 *ts)
731 struct buffer_page *tail_page, *head_page, *reader_page;
733 struct ring_buffer *buffer = cpu_buffer->buffer;
734 struct ring_buffer_event *event;
736 /* No locking needed for tail page */
737 tail_page = cpu_buffer->tail_page;
738 tail = cpu_buffer->tail;
740 if (tail + length > BUF_PAGE_SIZE) {
741 struct buffer_page *next_page = tail_page;
743 spin_lock(&cpu_buffer->lock);
744 rb_inc_page(cpu_buffer, &next_page);
746 head_page = cpu_buffer->head_page;
747 reader_page = cpu_buffer->reader_page;
749 /* we grabbed the lock before incrementing */
750 WARN_ON(next_page == reader_page);
752 if (next_page == head_page) {
753 if (!(buffer->flags & RB_FL_OVERWRITE)) {
754 spin_unlock(&cpu_buffer->lock);
758 /* count overflows */
759 rb_update_overflow(cpu_buffer);
761 rb_inc_page(cpu_buffer, &head_page);
762 cpu_buffer->head_page = head_page;
763 rb_reset_head_page(cpu_buffer);
766 if (tail != BUF_PAGE_SIZE) {
767 event = rb_page_index(tail_page, tail);
769 event->type = RINGBUF_TYPE_PADDING;
772 tail_page->size = tail;
773 tail_page = next_page;
776 cpu_buffer->tail_page = tail_page;
777 cpu_buffer->tail = tail;
778 rb_add_stamp(cpu_buffer, ts);
779 spin_unlock(&cpu_buffer->lock);
782 BUG_ON(tail + length > BUF_PAGE_SIZE);
784 event = rb_page_index(tail_page, tail);
785 rb_update_event(event, type, length);
791 rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
794 struct ring_buffer_event *event;
797 if (unlikely(*delta > (1ULL << 59) && !once++)) {
798 printk(KERN_WARNING "Delta way too big! %llu"
799 " ts=%llu write stamp = %llu\n",
800 *delta, *ts, cpu_buffer->write_stamp);
805 * The delta is too big, we to add a
808 event = __rb_reserve_next(cpu_buffer,
809 RINGBUF_TYPE_TIME_EXTEND,
815 /* check to see if we went to the next page */
816 if (cpu_buffer->tail) {
817 /* Still on same page, update timestamp */
818 event->time_delta = *delta & TS_MASK;
819 event->array[0] = *delta >> TS_SHIFT;
820 /* commit the time event */
822 rb_event_length(event);
823 cpu_buffer->write_stamp = *ts;
830 static struct ring_buffer_event *
831 rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
832 unsigned type, unsigned long length)
834 struct ring_buffer_event *event;
837 ts = ring_buffer_time_stamp(cpu_buffer->cpu);
839 if (cpu_buffer->tail) {
840 delta = ts - cpu_buffer->write_stamp;
842 if (test_time_stamp(delta)) {
845 ret = rb_add_time_stamp(cpu_buffer, &ts, &delta);
850 spin_lock(&cpu_buffer->lock);
851 rb_add_stamp(cpu_buffer, &ts);
852 spin_unlock(&cpu_buffer->lock);
856 event = __rb_reserve_next(cpu_buffer, type, length, &ts);
860 /* If the reserve went to the next page, our delta is zero */
861 if (!cpu_buffer->tail)
864 event->time_delta = delta;
870 * ring_buffer_lock_reserve - reserve a part of the buffer
871 * @buffer: the ring buffer to reserve from
872 * @length: the length of the data to reserve (excluding event header)
873 * @flags: a pointer to save the interrupt flags
875 * Returns a reseverd event on the ring buffer to copy directly to.
876 * The user of this interface will need to get the body to write into
877 * and can use the ring_buffer_event_data() interface.
879 * The length is the length of the data needed, not the event length
880 * which also includes the event header.
882 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
883 * If NULL is returned, then nothing has been allocated or locked.
885 struct ring_buffer_event *
886 ring_buffer_lock_reserve(struct ring_buffer *buffer,
887 unsigned long length,
888 unsigned long *flags)
890 struct ring_buffer_per_cpu *cpu_buffer;
891 struct ring_buffer_event *event;
894 if (atomic_read(&buffer->record_disabled))
897 local_irq_save(*flags);
898 cpu = raw_smp_processor_id();
900 if (!cpu_isset(cpu, buffer->cpumask))
903 cpu_buffer = buffer->buffers[cpu];
905 if (atomic_read(&cpu_buffer->record_disabled))
908 length = rb_calculate_event_length(length);
909 if (length > BUF_PAGE_SIZE)
912 event = rb_reserve_next_event(cpu_buffer, RINGBUF_TYPE_DATA, length);
919 local_irq_restore(*flags);
923 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
924 struct ring_buffer_event *event)
926 cpu_buffer->tail += rb_event_length(event);
927 cpu_buffer->tail_page->size = cpu_buffer->tail;
928 cpu_buffer->write_stamp += event->time_delta;
929 cpu_buffer->entries++;
933 * ring_buffer_unlock_commit - commit a reserved
934 * @buffer: The buffer to commit to
935 * @event: The event pointer to commit.
936 * @flags: the interrupt flags received from ring_buffer_lock_reserve.
938 * This commits the data to the ring buffer, and releases any locks held.
940 * Must be paired with ring_buffer_lock_reserve.
942 int ring_buffer_unlock_commit(struct ring_buffer *buffer,
943 struct ring_buffer_event *event,
946 struct ring_buffer_per_cpu *cpu_buffer;
947 int cpu = raw_smp_processor_id();
949 cpu_buffer = buffer->buffers[cpu];
951 rb_commit(cpu_buffer, event);
953 local_irq_restore(flags);
959 * ring_buffer_write - write data to the buffer without reserving
960 * @buffer: The ring buffer to write to.
961 * @length: The length of the data being written (excluding the event header)
962 * @data: The data to write to the buffer.
964 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
965 * one function. If you already have the data to write to the buffer, it
966 * may be easier to simply call this function.
968 * Note, like ring_buffer_lock_reserve, the length is the length of the data
969 * and not the length of the event which would hold the header.
971 int ring_buffer_write(struct ring_buffer *buffer,
972 unsigned long length,
975 struct ring_buffer_per_cpu *cpu_buffer;
976 struct ring_buffer_event *event;
977 unsigned long event_length, flags;
982 if (atomic_read(&buffer->record_disabled))
985 local_irq_save(flags);
986 cpu = raw_smp_processor_id();
988 if (!cpu_isset(cpu, buffer->cpumask))
991 cpu_buffer = buffer->buffers[cpu];
993 if (atomic_read(&cpu_buffer->record_disabled))
996 event_length = rb_calculate_event_length(length);
997 event = rb_reserve_next_event(cpu_buffer,
998 RINGBUF_TYPE_DATA, event_length);
1002 body = rb_event_data(event);
1004 memcpy(body, data, length);
1006 rb_commit(cpu_buffer, event);
1010 local_irq_restore(flags);
1016 * ring_buffer_record_disable - stop all writes into the buffer
1017 * @buffer: The ring buffer to stop writes to.
1019 * This prevents all writes to the buffer. Any attempt to write
1020 * to the buffer after this will fail and return NULL.
1022 * The caller should call synchronize_sched() after this.
1024 void ring_buffer_record_disable(struct ring_buffer *buffer)
1026 atomic_inc(&buffer->record_disabled);
1030 * ring_buffer_record_enable - enable writes to the buffer
1031 * @buffer: The ring buffer to enable writes
1033 * Note, multiple disables will need the same number of enables
1034 * to truely enable the writing (much like preempt_disable).
1036 void ring_buffer_record_enable(struct ring_buffer *buffer)
1038 atomic_dec(&buffer->record_disabled);
1042 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1043 * @buffer: The ring buffer to stop writes to.
1044 * @cpu: The CPU buffer to stop
1046 * This prevents all writes to the buffer. Any attempt to write
1047 * to the buffer after this will fail and return NULL.
1049 * The caller should call synchronize_sched() after this.
1051 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1053 struct ring_buffer_per_cpu *cpu_buffer;
1055 if (!cpu_isset(cpu, buffer->cpumask))
1058 cpu_buffer = buffer->buffers[cpu];
1059 atomic_inc(&cpu_buffer->record_disabled);
1063 * ring_buffer_record_enable_cpu - enable writes to the buffer
1064 * @buffer: The ring buffer to enable writes
1065 * @cpu: The CPU to enable.
1067 * Note, multiple disables will need the same number of enables
1068 * to truely enable the writing (much like preempt_disable).
1070 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1072 struct ring_buffer_per_cpu *cpu_buffer;
1074 if (!cpu_isset(cpu, buffer->cpumask))
1077 cpu_buffer = buffer->buffers[cpu];
1078 atomic_dec(&cpu_buffer->record_disabled);
1082 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1083 * @buffer: The ring buffer
1084 * @cpu: The per CPU buffer to get the entries from.
1086 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1088 struct ring_buffer_per_cpu *cpu_buffer;
1090 if (!cpu_isset(cpu, buffer->cpumask))
1093 cpu_buffer = buffer->buffers[cpu];
1094 return cpu_buffer->entries;
1098 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1099 * @buffer: The ring buffer
1100 * @cpu: The per CPU buffer to get the number of overruns from
1102 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1104 struct ring_buffer_per_cpu *cpu_buffer;
1106 if (!cpu_isset(cpu, buffer->cpumask))
1109 cpu_buffer = buffer->buffers[cpu];
1110 return cpu_buffer->overrun;
1114 * ring_buffer_entries - get the number of entries in a buffer
1115 * @buffer: The ring buffer
1117 * Returns the total number of entries in the ring buffer
1120 unsigned long ring_buffer_entries(struct ring_buffer *buffer)
1122 struct ring_buffer_per_cpu *cpu_buffer;
1123 unsigned long entries = 0;
1126 /* if you care about this being correct, lock the buffer */
1127 for_each_buffer_cpu(buffer, cpu) {
1128 cpu_buffer = buffer->buffers[cpu];
1129 entries += cpu_buffer->entries;
1136 * ring_buffer_overrun_cpu - get the number of overruns in buffer
1137 * @buffer: The ring buffer
1139 * Returns the total number of overruns in the ring buffer
1142 unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
1144 struct ring_buffer_per_cpu *cpu_buffer;
1145 unsigned long overruns = 0;
1148 /* if you care about this being correct, lock the buffer */
1149 for_each_buffer_cpu(buffer, cpu) {
1150 cpu_buffer = buffer->buffers[cpu];
1151 overruns += cpu_buffer->overrun;
1158 * ring_buffer_iter_reset - reset an iterator
1159 * @iter: The iterator to reset
1161 * Resets the iterator, so that it will start from the beginning
1164 void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1166 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1168 /* Iterator usage is expected to have record disabled */
1169 if (list_empty(&cpu_buffer->reader_page->list)) {
1170 iter->head_page = cpu_buffer->head_page;
1171 iter->head = cpu_buffer->head;
1173 iter->head_page = cpu_buffer->reader_page;
1174 iter->head = cpu_buffer->reader;
1177 iter->read_stamp = cpu_buffer->read_stamp;
1179 iter->read_stamp = iter->head_page->time_stamp;
1183 * ring_buffer_iter_empty - check if an iterator has no more to read
1184 * @iter: The iterator to check
1186 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
1188 struct ring_buffer_per_cpu *cpu_buffer;
1190 cpu_buffer = iter->cpu_buffer;
1192 return iter->head_page == cpu_buffer->tail_page &&
1193 iter->head == cpu_buffer->tail;
1197 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1198 struct ring_buffer_event *event)
1202 switch (event->type) {
1203 case RINGBUF_TYPE_PADDING:
1206 case RINGBUF_TYPE_TIME_EXTEND:
1207 delta = event->array[0];
1209 delta += event->time_delta;
1210 cpu_buffer->read_stamp += delta;
1213 case RINGBUF_TYPE_TIME_STAMP:
1214 /* FIXME: not implemented */
1217 case RINGBUF_TYPE_DATA:
1218 cpu_buffer->read_stamp += event->time_delta;
1228 rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
1229 struct ring_buffer_event *event)
1233 switch (event->type) {
1234 case RINGBUF_TYPE_PADDING:
1237 case RINGBUF_TYPE_TIME_EXTEND:
1238 delta = event->array[0];
1240 delta += event->time_delta;
1241 iter->read_stamp += delta;
1244 case RINGBUF_TYPE_TIME_STAMP:
1245 /* FIXME: not implemented */
1248 case RINGBUF_TYPE_DATA:
1249 iter->read_stamp += event->time_delta;
1258 static struct buffer_page *
1259 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1261 struct buffer_page *reader = NULL;
1262 unsigned long flags;
1264 spin_lock_irqsave(&cpu_buffer->lock, flags);
1267 reader = cpu_buffer->reader_page;
1269 /* If there's more to read, return this page */
1270 if (cpu_buffer->reader < reader->size)
1273 /* Never should we have an index greater than the size */
1274 WARN_ON(cpu_buffer->reader > reader->size);
1276 /* check if we caught up to the tail */
1278 if (cpu_buffer->tail_page == cpu_buffer->reader_page)
1282 * Splice the empty reader page into the list around the head.
1283 * Reset the reader page to size zero.
1286 reader = cpu_buffer->head_page;
1287 cpu_buffer->reader_page->list.next = reader->list.next;
1288 cpu_buffer->reader_page->list.prev = reader->list.prev;
1289 cpu_buffer->reader_page->size = 0;
1291 /* Make the reader page now replace the head */
1292 reader->list.prev->next = &cpu_buffer->reader_page->list;
1293 reader->list.next->prev = &cpu_buffer->reader_page->list;
1296 * If the tail is on the reader, then we must set the head
1297 * to the inserted page, otherwise we set it one before.
1299 cpu_buffer->head_page = cpu_buffer->reader_page;
1301 if (cpu_buffer->tail_page != reader)
1302 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
1304 /* Finally update the reader page to the new head */
1305 cpu_buffer->reader_page = reader;
1306 rb_reset_reader_page(cpu_buffer);
1311 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
1316 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
1318 struct ring_buffer_event *event;
1319 struct buffer_page *reader;
1322 reader = rb_get_reader_page(cpu_buffer);
1324 /* This function should not be called when buffer is empty */
1327 event = rb_reader_event(cpu_buffer);
1329 if (event->type == RINGBUF_TYPE_DATA)
1330 cpu_buffer->entries--;
1332 rb_update_read_stamp(cpu_buffer, event);
1334 length = rb_event_length(event);
1335 cpu_buffer->reader += length;
1338 static void rb_advance_iter(struct ring_buffer_iter *iter)
1340 struct ring_buffer *buffer;
1341 struct ring_buffer_per_cpu *cpu_buffer;
1342 struct ring_buffer_event *event;
1345 cpu_buffer = iter->cpu_buffer;
1346 buffer = cpu_buffer->buffer;
1349 * Check if we are at the end of the buffer.
1351 if (iter->head >= iter->head_page->size) {
1352 BUG_ON(iter->head_page == cpu_buffer->tail_page);
1357 event = rb_iter_head_event(iter);
1359 length = rb_event_length(event);
1362 * This should not be called to advance the header if we are
1363 * at the tail of the buffer.
1365 BUG_ON((iter->head_page == cpu_buffer->tail_page) &&
1366 (iter->head + length > cpu_buffer->tail));
1368 rb_update_iter_read_stamp(iter, event);
1370 iter->head += length;
1372 /* check for end of page padding */
1373 if ((iter->head >= iter->head_page->size) &&
1374 (iter->head_page != cpu_buffer->tail_page))
1375 rb_advance_iter(iter);
1379 * ring_buffer_peek - peek at the next event to be read
1380 * @buffer: The ring buffer to read
1381 * @cpu: The cpu to peak at
1382 * @ts: The timestamp counter of this event.
1384 * This will return the event that will be read next, but does
1385 * not consume the data.
1387 struct ring_buffer_event *
1388 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1390 struct ring_buffer_per_cpu *cpu_buffer;
1391 struct ring_buffer_event *event;
1392 struct buffer_page *reader;
1394 if (!cpu_isset(cpu, buffer->cpumask))
1397 cpu_buffer = buffer->buffers[cpu];
1400 reader = rb_get_reader_page(cpu_buffer);
1404 event = rb_reader_event(cpu_buffer);
1406 switch (event->type) {
1407 case RINGBUF_TYPE_PADDING:
1409 rb_advance_reader(cpu_buffer);
1412 case RINGBUF_TYPE_TIME_EXTEND:
1413 /* Internal data, OK to advance */
1414 rb_advance_reader(cpu_buffer);
1417 case RINGBUF_TYPE_TIME_STAMP:
1418 /* FIXME: not implemented */
1419 rb_advance_reader(cpu_buffer);
1422 case RINGBUF_TYPE_DATA:
1424 *ts = cpu_buffer->read_stamp + event->time_delta;
1425 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1437 * ring_buffer_iter_peek - peek at the next event to be read
1438 * @iter: The ring buffer iterator
1439 * @ts: The timestamp counter of this event.
1441 * This will return the event that will be read next, but does
1442 * not increment the iterator.
1444 struct ring_buffer_event *
1445 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1447 struct ring_buffer *buffer;
1448 struct ring_buffer_per_cpu *cpu_buffer;
1449 struct ring_buffer_event *event;
1451 if (ring_buffer_iter_empty(iter))
1454 cpu_buffer = iter->cpu_buffer;
1455 buffer = cpu_buffer->buffer;
1458 if (rb_per_cpu_empty(cpu_buffer))
1461 event = rb_iter_head_event(iter);
1463 switch (event->type) {
1464 case RINGBUF_TYPE_PADDING:
1468 case RINGBUF_TYPE_TIME_EXTEND:
1469 /* Internal data, OK to advance */
1470 rb_advance_iter(iter);
1473 case RINGBUF_TYPE_TIME_STAMP:
1474 /* FIXME: not implemented */
1475 rb_advance_iter(iter);
1478 case RINGBUF_TYPE_DATA:
1480 *ts = iter->read_stamp + event->time_delta;
1481 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1493 * ring_buffer_consume - return an event and consume it
1494 * @buffer: The ring buffer to get the next event from
1496 * Returns the next event in the ring buffer, and that event is consumed.
1497 * Meaning, that sequential reads will keep returning a different event,
1498 * and eventually empty the ring buffer if the producer is slower.
1500 struct ring_buffer_event *
1501 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
1503 struct ring_buffer_per_cpu *cpu_buffer;
1504 struct ring_buffer_event *event;
1506 if (!cpu_isset(cpu, buffer->cpumask))
1509 event = ring_buffer_peek(buffer, cpu, ts);
1513 cpu_buffer = buffer->buffers[cpu];
1514 rb_advance_reader(cpu_buffer);
1520 * ring_buffer_read_start - start a non consuming read of the buffer
1521 * @buffer: The ring buffer to read from
1522 * @cpu: The cpu buffer to iterate over
1524 * This starts up an iteration through the buffer. It also disables
1525 * the recording to the buffer until the reading is finished.
1526 * This prevents the reading from being corrupted. This is not
1527 * a consuming read, so a producer is not expected.
1529 * Must be paired with ring_buffer_finish.
1531 struct ring_buffer_iter *
1532 ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
1534 struct ring_buffer_per_cpu *cpu_buffer;
1535 struct ring_buffer_iter *iter;
1536 unsigned long flags;
1538 if (!cpu_isset(cpu, buffer->cpumask))
1541 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
1545 cpu_buffer = buffer->buffers[cpu];
1547 iter->cpu_buffer = cpu_buffer;
1549 atomic_inc(&cpu_buffer->record_disabled);
1550 synchronize_sched();
1552 spin_lock_irqsave(&cpu_buffer->lock, flags);
1553 ring_buffer_iter_reset(iter);
1554 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
1560 * ring_buffer_finish - finish reading the iterator of the buffer
1561 * @iter: The iterator retrieved by ring_buffer_start
1563 * This re-enables the recording to the buffer, and frees the
1567 ring_buffer_read_finish(struct ring_buffer_iter *iter)
1569 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1571 atomic_dec(&cpu_buffer->record_disabled);
1576 * ring_buffer_read - read the next item in the ring buffer by the iterator
1577 * @iter: The ring buffer iterator
1578 * @ts: The time stamp of the event read.
1580 * This reads the next event in the ring buffer and increments the iterator.
1582 struct ring_buffer_event *
1583 ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
1585 struct ring_buffer_event *event;
1587 event = ring_buffer_iter_peek(iter, ts);
1591 rb_advance_iter(iter);
1597 * ring_buffer_size - return the size of the ring buffer (in bytes)
1598 * @buffer: The ring buffer.
1600 unsigned long ring_buffer_size(struct ring_buffer *buffer)
1602 return BUF_PAGE_SIZE * buffer->pages;
1606 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
1608 cpu_buffer->head_page
1609 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
1610 cpu_buffer->head_page->size = 0;
1611 cpu_buffer->tail_page = cpu_buffer->head_page;
1612 cpu_buffer->tail_page->size = 0;
1613 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1614 cpu_buffer->reader_page->size = 0;
1616 cpu_buffer->head = cpu_buffer->tail = cpu_buffer->reader = 0;
1618 cpu_buffer->overrun = 0;
1619 cpu_buffer->entries = 0;
1623 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
1624 * @buffer: The ring buffer to reset a per cpu buffer of
1625 * @cpu: The CPU buffer to be reset
1627 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
1629 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
1630 unsigned long flags;
1632 if (!cpu_isset(cpu, buffer->cpumask))
1635 spin_lock_irqsave(&cpu_buffer->lock, flags);
1637 rb_reset_cpu(cpu_buffer);
1639 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
1643 * ring_buffer_reset - reset a ring buffer
1644 * @buffer: The ring buffer to reset all cpu buffers
1646 void ring_buffer_reset(struct ring_buffer *buffer)
1650 for_each_buffer_cpu(buffer, cpu)
1651 ring_buffer_reset_cpu(buffer, cpu);
1655 * rind_buffer_empty - is the ring buffer empty?
1656 * @buffer: The ring buffer to test
1658 int ring_buffer_empty(struct ring_buffer *buffer)
1660 struct ring_buffer_per_cpu *cpu_buffer;
1663 /* yes this is racy, but if you don't like the race, lock the buffer */
1664 for_each_buffer_cpu(buffer, cpu) {
1665 cpu_buffer = buffer->buffers[cpu];
1666 if (!rb_per_cpu_empty(cpu_buffer))
1673 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
1674 * @buffer: The ring buffer
1675 * @cpu: The CPU buffer to test
1677 int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
1679 struct ring_buffer_per_cpu *cpu_buffer;
1681 if (!cpu_isset(cpu, buffer->cpumask))
1684 cpu_buffer = buffer->buffers[cpu];
1685 return rb_per_cpu_empty(cpu_buffer);
1689 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
1690 * @buffer_a: One buffer to swap with
1691 * @buffer_b: The other buffer to swap with
1693 * This function is useful for tracers that want to take a "snapshot"
1694 * of a CPU buffer and has another back up buffer lying around.
1695 * it is expected that the tracer handles the cpu buffer not being
1696 * used at the moment.
1698 int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
1699 struct ring_buffer *buffer_b, int cpu)
1701 struct ring_buffer_per_cpu *cpu_buffer_a;
1702 struct ring_buffer_per_cpu *cpu_buffer_b;
1704 if (!cpu_isset(cpu, buffer_a->cpumask) ||
1705 !cpu_isset(cpu, buffer_b->cpumask))
1708 /* At least make sure the two buffers are somewhat the same */
1709 if (buffer_a->size != buffer_b->size ||
1710 buffer_a->pages != buffer_b->pages)
1713 cpu_buffer_a = buffer_a->buffers[cpu];
1714 cpu_buffer_b = buffer_b->buffers[cpu];
1717 * We can't do a synchronize_sched here because this
1718 * function can be called in atomic context.
1719 * Normally this will be called from the same CPU as cpu.
1720 * If not it's up to the caller to protect this.
1722 atomic_inc(&cpu_buffer_a->record_disabled);
1723 atomic_inc(&cpu_buffer_b->record_disabled);
1725 buffer_a->buffers[cpu] = cpu_buffer_b;
1726 buffer_b->buffers[cpu] = cpu_buffer_a;
1728 cpu_buffer_b->buffer = buffer_a;
1729 cpu_buffer_a->buffer = buffer_b;
1731 atomic_dec(&cpu_buffer_a->record_disabled);
1732 atomic_dec(&cpu_buffer_b->record_disabled);