4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
6 #include <linux/ring_buffer.h>
7 #include <linux/spinlock.h>
8 #include <linux/debugfs.h>
9 #include <linux/uaccess.h>
10 #include <linux/module.h>
11 #include <linux/percpu.h>
12 #include <linux/mutex.h>
13 #include <linux/sched.h> /* used for sched_clock() (for now) */
14 #include <linux/init.h>
15 #include <linux/hash.h>
16 #include <linux/list.h>
21 /* Global flag to disable all recording to ring buffers */
22 static int ring_buffers_off __read_mostly;
25 * tracing_on - enable all tracing buffers
27 * This function enables all tracing buffers that may have been
28 * disabled with tracing_off.
36 * tracing_off - turn off all tracing buffers
38 * This function stops all tracing buffers from recording data.
39 * It does not disable any overhead the tracers themselves may
40 * be causing. This function simply causes all recording to
41 * the ring buffers to fail.
43 void tracing_off(void)
48 /* Up this if you want to test the TIME_EXTENTS and normalization */
52 u64 ring_buffer_time_stamp(int cpu)
56 preempt_disable_notrace();
57 /* shift to debug/test normalization and TIME_EXTENTS */
58 time = sched_clock() << DEBUG_SHIFT;
59 preempt_enable_notrace();
64 void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
66 /* Just stupid testing the normalize function and deltas */
70 #define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
71 #define RB_ALIGNMENT_SHIFT 2
72 #define RB_ALIGNMENT (1 << RB_ALIGNMENT_SHIFT)
73 #define RB_MAX_SMALL_DATA 28
76 RB_LEN_TIME_EXTEND = 8,
77 RB_LEN_TIME_STAMP = 16,
80 /* inline for ring buffer fast paths */
81 static inline unsigned
82 rb_event_length(struct ring_buffer_event *event)
86 switch (event->type) {
87 case RINGBUF_TYPE_PADDING:
91 case RINGBUF_TYPE_TIME_EXTEND:
92 return RB_LEN_TIME_EXTEND;
94 case RINGBUF_TYPE_TIME_STAMP:
95 return RB_LEN_TIME_STAMP;
97 case RINGBUF_TYPE_DATA:
99 length = event->len << RB_ALIGNMENT_SHIFT;
101 length = event->array[0];
102 return length + RB_EVNT_HDR_SIZE;
111 * ring_buffer_event_length - return the length of the event
112 * @event: the event to get the length of
114 unsigned ring_buffer_event_length(struct ring_buffer_event *event)
116 return rb_event_length(event);
119 /* inline for ring buffer fast paths */
121 rb_event_data(struct ring_buffer_event *event)
123 BUG_ON(event->type != RINGBUF_TYPE_DATA);
124 /* If length is in len field, then array[0] has the data */
126 return (void *)&event->array[0];
127 /* Otherwise length is in array[0] and array[1] has the data */
128 return (void *)&event->array[1];
132 * ring_buffer_event_data - return the data of the event
133 * @event: the event to get the data from
135 void *ring_buffer_event_data(struct ring_buffer_event *event)
137 return rb_event_data(event);
140 #define for_each_buffer_cpu(buffer, cpu) \
141 for_each_cpu_mask(cpu, buffer->cpumask)
144 #define TS_MASK ((1ULL << TS_SHIFT) - 1)
145 #define TS_DELTA_TEST (~TS_MASK)
148 * This hack stolen from mm/slob.c.
149 * We can store per page timing information in the page frame of the page.
150 * Thanks to Peter Zijlstra for suggesting this idea.
153 u64 time_stamp; /* page time stamp */
154 local_t write; /* index for next write */
155 local_t commit; /* write commited index */
156 unsigned read; /* index for next read */
157 struct list_head list; /* list of free pages */
158 void *page; /* Actual data page */
162 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
165 static inline void free_buffer_page(struct buffer_page *bpage)
168 free_page((unsigned long)bpage->page);
173 * We need to fit the time_stamp delta into 27 bits.
175 static inline int test_time_stamp(u64 delta)
177 if (delta & TS_DELTA_TEST)
182 #define BUF_PAGE_SIZE PAGE_SIZE
185 * head_page == tail_page && head == tail then buffer is empty.
187 struct ring_buffer_per_cpu {
189 struct ring_buffer *buffer;
191 struct lock_class_key lock_key;
192 struct list_head pages;
193 struct buffer_page *head_page; /* read from head */
194 struct buffer_page *tail_page; /* write to tail */
195 struct buffer_page *commit_page; /* commited pages */
196 struct buffer_page *reader_page;
197 unsigned long overrun;
198 unsigned long entries;
201 atomic_t record_disabled;
210 atomic_t record_disabled;
214 struct ring_buffer_per_cpu **buffers;
217 struct ring_buffer_iter {
218 struct ring_buffer_per_cpu *cpu_buffer;
220 struct buffer_page *head_page;
224 #define RB_WARN_ON(buffer, cond) \
226 if (unlikely(cond)) { \
227 atomic_inc(&buffer->record_disabled); \
232 #define RB_WARN_ON_RET(buffer, cond) \
234 if (unlikely(cond)) { \
235 atomic_inc(&buffer->record_disabled); \
241 #define RB_WARN_ON_ONCE(buffer, cond) \
244 if (unlikely(cond) && !once) { \
246 atomic_inc(&buffer->record_disabled); \
252 * check_pages - integrity check of buffer pages
253 * @cpu_buffer: CPU buffer with pages to test
255 * As a safty measure we check to make sure the data pages have not
258 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
260 struct list_head *head = &cpu_buffer->pages;
261 struct buffer_page *page, *tmp;
263 RB_WARN_ON_RET(cpu_buffer, head->next->prev != head);
264 RB_WARN_ON_RET(cpu_buffer, head->prev->next != head);
266 list_for_each_entry_safe(page, tmp, head, list) {
267 RB_WARN_ON_RET(cpu_buffer,
268 page->list.next->prev != &page->list);
269 RB_WARN_ON_RET(cpu_buffer,
270 page->list.prev->next != &page->list);
276 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
279 struct list_head *head = &cpu_buffer->pages;
280 struct buffer_page *page, *tmp;
285 for (i = 0; i < nr_pages; i++) {
286 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
287 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
290 list_add(&page->list, &pages);
292 addr = __get_free_page(GFP_KERNEL);
295 page->page = (void *)addr;
298 list_splice(&pages, head);
300 rb_check_pages(cpu_buffer);
305 list_for_each_entry_safe(page, tmp, &pages, list) {
306 list_del_init(&page->list);
307 free_buffer_page(page);
312 static struct ring_buffer_per_cpu *
313 rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
315 struct ring_buffer_per_cpu *cpu_buffer;
316 struct buffer_page *page;
320 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
321 GFP_KERNEL, cpu_to_node(cpu));
325 cpu_buffer->cpu = cpu;
326 cpu_buffer->buffer = buffer;
327 spin_lock_init(&cpu_buffer->lock);
328 INIT_LIST_HEAD(&cpu_buffer->pages);
330 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
331 GFP_KERNEL, cpu_to_node(cpu));
333 goto fail_free_buffer;
335 cpu_buffer->reader_page = page;
336 addr = __get_free_page(GFP_KERNEL);
338 goto fail_free_reader;
339 page->page = (void *)addr;
341 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
343 ret = rb_allocate_pages(cpu_buffer, buffer->pages);
345 goto fail_free_reader;
347 cpu_buffer->head_page
348 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
349 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
354 free_buffer_page(cpu_buffer->reader_page);
361 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
363 struct list_head *head = &cpu_buffer->pages;
364 struct buffer_page *page, *tmp;
366 list_del_init(&cpu_buffer->reader_page->list);
367 free_buffer_page(cpu_buffer->reader_page);
369 list_for_each_entry_safe(page, tmp, head, list) {
370 list_del_init(&page->list);
371 free_buffer_page(page);
377 * Causes compile errors if the struct buffer_page gets bigger
378 * than the struct page.
380 extern int ring_buffer_page_too_big(void);
383 * ring_buffer_alloc - allocate a new ring_buffer
384 * @size: the size in bytes that is needed.
385 * @flags: attributes to set for the ring buffer.
387 * Currently the only flag that is available is the RB_FL_OVERWRITE
388 * flag. This flag means that the buffer will overwrite old data
389 * when the buffer wraps. If this flag is not set, the buffer will
390 * drop data when the tail hits the head.
392 struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
394 struct ring_buffer *buffer;
398 /* Paranoid! Optimizes out when all is well */
399 if (sizeof(struct buffer_page) > sizeof(struct page))
400 ring_buffer_page_too_big();
403 /* keep it in its own cache line */
404 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
409 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
410 buffer->flags = flags;
412 /* need at least two pages */
413 if (buffer->pages == 1)
416 buffer->cpumask = cpu_possible_map;
417 buffer->cpus = nr_cpu_ids;
419 bsize = sizeof(void *) * nr_cpu_ids;
420 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
422 if (!buffer->buffers)
423 goto fail_free_buffer;
425 for_each_buffer_cpu(buffer, cpu) {
426 buffer->buffers[cpu] =
427 rb_allocate_cpu_buffer(buffer, cpu);
428 if (!buffer->buffers[cpu])
429 goto fail_free_buffers;
432 mutex_init(&buffer->mutex);
437 for_each_buffer_cpu(buffer, cpu) {
438 if (buffer->buffers[cpu])
439 rb_free_cpu_buffer(buffer->buffers[cpu]);
441 kfree(buffer->buffers);
449 * ring_buffer_free - free a ring buffer.
450 * @buffer: the buffer to free.
453 ring_buffer_free(struct ring_buffer *buffer)
457 for_each_buffer_cpu(buffer, cpu)
458 rb_free_cpu_buffer(buffer->buffers[cpu]);
463 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
466 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
468 struct buffer_page *page;
472 atomic_inc(&cpu_buffer->record_disabled);
475 for (i = 0; i < nr_pages; i++) {
476 BUG_ON(list_empty(&cpu_buffer->pages));
477 p = cpu_buffer->pages.next;
478 page = list_entry(p, struct buffer_page, list);
479 list_del_init(&page->list);
480 free_buffer_page(page);
482 BUG_ON(list_empty(&cpu_buffer->pages));
484 rb_reset_cpu(cpu_buffer);
486 rb_check_pages(cpu_buffer);
488 atomic_dec(&cpu_buffer->record_disabled);
493 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
494 struct list_head *pages, unsigned nr_pages)
496 struct buffer_page *page;
500 atomic_inc(&cpu_buffer->record_disabled);
503 for (i = 0; i < nr_pages; i++) {
504 BUG_ON(list_empty(pages));
506 page = list_entry(p, struct buffer_page, list);
507 list_del_init(&page->list);
508 list_add_tail(&page->list, &cpu_buffer->pages);
510 rb_reset_cpu(cpu_buffer);
512 rb_check_pages(cpu_buffer);
514 atomic_dec(&cpu_buffer->record_disabled);
518 * ring_buffer_resize - resize the ring buffer
519 * @buffer: the buffer to resize.
520 * @size: the new size.
522 * The tracer is responsible for making sure that the buffer is
523 * not being used while changing the size.
524 * Note: We may be able to change the above requirement by using
525 * RCU synchronizations.
527 * Minimum size is 2 * BUF_PAGE_SIZE.
529 * Returns -1 on failure.
531 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
533 struct ring_buffer_per_cpu *cpu_buffer;
534 unsigned nr_pages, rm_pages, new_pages;
535 struct buffer_page *page, *tmp;
536 unsigned long buffer_size;
541 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
542 size *= BUF_PAGE_SIZE;
543 buffer_size = buffer->pages * BUF_PAGE_SIZE;
545 /* we need a minimum of two pages */
546 if (size < BUF_PAGE_SIZE * 2)
547 size = BUF_PAGE_SIZE * 2;
549 if (size == buffer_size)
552 mutex_lock(&buffer->mutex);
554 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
556 if (size < buffer_size) {
558 /* easy case, just free pages */
559 BUG_ON(nr_pages >= buffer->pages);
561 rm_pages = buffer->pages - nr_pages;
563 for_each_buffer_cpu(buffer, cpu) {
564 cpu_buffer = buffer->buffers[cpu];
565 rb_remove_pages(cpu_buffer, rm_pages);
571 * This is a bit more difficult. We only want to add pages
572 * when we can allocate enough for all CPUs. We do this
573 * by allocating all the pages and storing them on a local
574 * link list. If we succeed in our allocation, then we
575 * add these pages to the cpu_buffers. Otherwise we just free
576 * them all and return -ENOMEM;
578 BUG_ON(nr_pages <= buffer->pages);
579 new_pages = nr_pages - buffer->pages;
581 for_each_buffer_cpu(buffer, cpu) {
582 for (i = 0; i < new_pages; i++) {
583 page = kzalloc_node(ALIGN(sizeof(*page),
585 GFP_KERNEL, cpu_to_node(cpu));
588 list_add(&page->list, &pages);
589 addr = __get_free_page(GFP_KERNEL);
592 page->page = (void *)addr;
596 for_each_buffer_cpu(buffer, cpu) {
597 cpu_buffer = buffer->buffers[cpu];
598 rb_insert_pages(cpu_buffer, &pages, new_pages);
601 BUG_ON(!list_empty(&pages));
604 buffer->pages = nr_pages;
605 mutex_unlock(&buffer->mutex);
610 list_for_each_entry_safe(page, tmp, &pages, list) {
611 list_del_init(&page->list);
612 free_buffer_page(page);
617 static inline int rb_null_event(struct ring_buffer_event *event)
619 return event->type == RINGBUF_TYPE_PADDING;
622 static inline void *__rb_page_index(struct buffer_page *page, unsigned index)
624 return page->page + index;
627 static inline struct ring_buffer_event *
628 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
630 return __rb_page_index(cpu_buffer->reader_page,
631 cpu_buffer->reader_page->read);
634 static inline struct ring_buffer_event *
635 rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
637 return __rb_page_index(cpu_buffer->head_page,
638 cpu_buffer->head_page->read);
641 static inline struct ring_buffer_event *
642 rb_iter_head_event(struct ring_buffer_iter *iter)
644 return __rb_page_index(iter->head_page, iter->head);
647 static inline unsigned rb_page_write(struct buffer_page *bpage)
649 return local_read(&bpage->write);
652 static inline unsigned rb_page_commit(struct buffer_page *bpage)
654 return local_read(&bpage->commit);
657 /* Size is determined by what has been commited */
658 static inline unsigned rb_page_size(struct buffer_page *bpage)
660 return rb_page_commit(bpage);
663 static inline unsigned
664 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
666 return rb_page_commit(cpu_buffer->commit_page);
669 static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
671 return rb_page_commit(cpu_buffer->head_page);
675 * When the tail hits the head and the buffer is in overwrite mode,
676 * the head jumps to the next page and all content on the previous
677 * page is discarded. But before doing so, we update the overrun
678 * variable of the buffer.
680 static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
682 struct ring_buffer_event *event;
685 for (head = 0; head < rb_head_size(cpu_buffer);
686 head += rb_event_length(event)) {
688 event = __rb_page_index(cpu_buffer->head_page, head);
689 BUG_ON(rb_null_event(event));
690 /* Only count data entries */
691 if (event->type != RINGBUF_TYPE_DATA)
693 cpu_buffer->overrun++;
694 cpu_buffer->entries--;
698 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
699 struct buffer_page **page)
701 struct list_head *p = (*page)->list.next;
703 if (p == &cpu_buffer->pages)
706 *page = list_entry(p, struct buffer_page, list);
709 static inline unsigned
710 rb_event_index(struct ring_buffer_event *event)
712 unsigned long addr = (unsigned long)event;
714 return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
718 rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
719 struct ring_buffer_event *event)
721 unsigned long addr = (unsigned long)event;
724 index = rb_event_index(event);
727 return cpu_buffer->commit_page->page == (void *)addr &&
728 rb_commit_index(cpu_buffer) == index;
732 rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
733 struct ring_buffer_event *event)
735 unsigned long addr = (unsigned long)event;
738 index = rb_event_index(event);
741 while (cpu_buffer->commit_page->page != (void *)addr) {
742 RB_WARN_ON(cpu_buffer,
743 cpu_buffer->commit_page == cpu_buffer->tail_page);
744 cpu_buffer->commit_page->commit =
745 cpu_buffer->commit_page->write;
746 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
747 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
750 /* Now set the commit to the event's index */
751 local_set(&cpu_buffer->commit_page->commit, index);
755 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
758 * We only race with interrupts and NMIs on this CPU.
759 * If we own the commit event, then we can commit
760 * all others that interrupted us, since the interruptions
761 * are in stack format (they finish before they come
762 * back to us). This allows us to do a simple loop to
763 * assign the commit to the tail.
765 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
766 cpu_buffer->commit_page->commit =
767 cpu_buffer->commit_page->write;
768 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
769 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
770 /* add barrier to keep gcc from optimizing too much */
773 while (rb_commit_index(cpu_buffer) !=
774 rb_page_write(cpu_buffer->commit_page)) {
775 cpu_buffer->commit_page->commit =
776 cpu_buffer->commit_page->write;
781 static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
783 cpu_buffer->read_stamp = cpu_buffer->reader_page->time_stamp;
784 cpu_buffer->reader_page->read = 0;
787 static inline void rb_inc_iter(struct ring_buffer_iter *iter)
789 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
792 * The iterator could be on the reader page (it starts there).
793 * But the head could have moved, since the reader was
794 * found. Check for this case and assign the iterator
795 * to the head page instead of next.
797 if (iter->head_page == cpu_buffer->reader_page)
798 iter->head_page = cpu_buffer->head_page;
800 rb_inc_page(cpu_buffer, &iter->head_page);
802 iter->read_stamp = iter->head_page->time_stamp;
807 * ring_buffer_update_event - update event type and data
808 * @event: the even to update
809 * @type: the type of event
810 * @length: the size of the event field in the ring buffer
812 * Update the type and data fields of the event. The length
813 * is the actual size that is written to the ring buffer,
814 * and with this, we can determine what to place into the
818 rb_update_event(struct ring_buffer_event *event,
819 unsigned type, unsigned length)
825 case RINGBUF_TYPE_PADDING:
828 case RINGBUF_TYPE_TIME_EXTEND:
830 (RB_LEN_TIME_EXTEND + (RB_ALIGNMENT-1))
831 >> RB_ALIGNMENT_SHIFT;
834 case RINGBUF_TYPE_TIME_STAMP:
836 (RB_LEN_TIME_STAMP + (RB_ALIGNMENT-1))
837 >> RB_ALIGNMENT_SHIFT;
840 case RINGBUF_TYPE_DATA:
841 length -= RB_EVNT_HDR_SIZE;
842 if (length > RB_MAX_SMALL_DATA) {
844 event->array[0] = length;
847 (length + (RB_ALIGNMENT-1))
848 >> RB_ALIGNMENT_SHIFT;
855 static inline unsigned rb_calculate_event_length(unsigned length)
857 struct ring_buffer_event event; /* Used only for sizeof array */
859 /* zero length can cause confusions */
863 if (length > RB_MAX_SMALL_DATA)
864 length += sizeof(event.array[0]);
866 length += RB_EVNT_HDR_SIZE;
867 length = ALIGN(length, RB_ALIGNMENT);
872 static struct ring_buffer_event *
873 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
874 unsigned type, unsigned long length, u64 *ts)
876 struct buffer_page *tail_page, *head_page, *reader_page;
877 unsigned long tail, write;
878 struct ring_buffer *buffer = cpu_buffer->buffer;
879 struct ring_buffer_event *event;
882 tail_page = cpu_buffer->tail_page;
883 write = local_add_return(length, &tail_page->write);
884 tail = write - length;
886 /* See if we shot pass the end of this buffer page */
887 if (write > BUF_PAGE_SIZE) {
888 struct buffer_page *next_page = tail_page;
890 spin_lock_irqsave(&cpu_buffer->lock, flags);
892 rb_inc_page(cpu_buffer, &next_page);
894 head_page = cpu_buffer->head_page;
895 reader_page = cpu_buffer->reader_page;
897 /* we grabbed the lock before incrementing */
898 RB_WARN_ON(cpu_buffer, next_page == reader_page);
901 * If for some reason, we had an interrupt storm that made
902 * it all the way around the buffer, bail, and warn
905 if (unlikely(next_page == cpu_buffer->commit_page)) {
910 if (next_page == head_page) {
911 if (!(buffer->flags & RB_FL_OVERWRITE)) {
913 if (tail <= BUF_PAGE_SIZE)
914 local_set(&tail_page->write, tail);
918 /* tail_page has not moved yet? */
919 if (tail_page == cpu_buffer->tail_page) {
920 /* count overflows */
921 rb_update_overflow(cpu_buffer);
923 rb_inc_page(cpu_buffer, &head_page);
924 cpu_buffer->head_page = head_page;
925 cpu_buffer->head_page->read = 0;
930 * If the tail page is still the same as what we think
931 * it is, then it is up to us to update the tail
934 if (tail_page == cpu_buffer->tail_page) {
935 local_set(&next_page->write, 0);
936 local_set(&next_page->commit, 0);
937 cpu_buffer->tail_page = next_page;
939 /* reread the time stamp */
940 *ts = ring_buffer_time_stamp(cpu_buffer->cpu);
941 cpu_buffer->tail_page->time_stamp = *ts;
945 * The actual tail page has moved forward.
947 if (tail < BUF_PAGE_SIZE) {
948 /* Mark the rest of the page with padding */
949 event = __rb_page_index(tail_page, tail);
950 event->type = RINGBUF_TYPE_PADDING;
953 if (tail <= BUF_PAGE_SIZE)
954 /* Set the write back to the previous setting */
955 local_set(&tail_page->write, tail);
958 * If this was a commit entry that failed,
961 if (tail_page == cpu_buffer->commit_page &&
962 tail == rb_commit_index(cpu_buffer)) {
963 rb_set_commit_to_write(cpu_buffer);
966 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
968 /* fail and let the caller try again */
969 return ERR_PTR(-EAGAIN);
972 /* We reserved something on the buffer */
974 BUG_ON(write > BUF_PAGE_SIZE);
976 event = __rb_page_index(tail_page, tail);
977 rb_update_event(event, type, length);
980 * If this is a commit and the tail is zero, then update
981 * this page's time stamp.
983 if (!tail && rb_is_commit(cpu_buffer, event))
984 cpu_buffer->commit_page->time_stamp = *ts;
989 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
994 rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
997 struct ring_buffer_event *event;
1001 if (unlikely(*delta > (1ULL << 59) && !once++)) {
1002 printk(KERN_WARNING "Delta way too big! %llu"
1003 " ts=%llu write stamp = %llu\n",
1004 (unsigned long long)*delta,
1005 (unsigned long long)*ts,
1006 (unsigned long long)cpu_buffer->write_stamp);
1011 * The delta is too big, we to add a
1014 event = __rb_reserve_next(cpu_buffer,
1015 RINGBUF_TYPE_TIME_EXTEND,
1021 if (PTR_ERR(event) == -EAGAIN)
1024 /* Only a commited time event can update the write stamp */
1025 if (rb_is_commit(cpu_buffer, event)) {
1027 * If this is the first on the page, then we need to
1028 * update the page itself, and just put in a zero.
1030 if (rb_event_index(event)) {
1031 event->time_delta = *delta & TS_MASK;
1032 event->array[0] = *delta >> TS_SHIFT;
1034 cpu_buffer->commit_page->time_stamp = *ts;
1035 event->time_delta = 0;
1036 event->array[0] = 0;
1038 cpu_buffer->write_stamp = *ts;
1039 /* let the caller know this was the commit */
1042 /* Darn, this is just wasted space */
1043 event->time_delta = 0;
1044 event->array[0] = 0;
1053 static struct ring_buffer_event *
1054 rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1055 unsigned type, unsigned long length)
1057 struct ring_buffer_event *event;
1064 * We allow for interrupts to reenter here and do a trace.
1065 * If one does, it will cause this original code to loop
1066 * back here. Even with heavy interrupts happening, this
1067 * should only happen a few times in a row. If this happens
1068 * 1000 times in a row, there must be either an interrupt
1069 * storm or we have something buggy.
1072 if (unlikely(++nr_loops > 1000)) {
1073 RB_WARN_ON(cpu_buffer, 1);
1077 ts = ring_buffer_time_stamp(cpu_buffer->cpu);
1080 * Only the first commit can update the timestamp.
1081 * Yes there is a race here. If an interrupt comes in
1082 * just after the conditional and it traces too, then it
1083 * will also check the deltas. More than one timestamp may
1084 * also be made. But only the entry that did the actual
1085 * commit will be something other than zero.
1087 if (cpu_buffer->tail_page == cpu_buffer->commit_page &&
1088 rb_page_write(cpu_buffer->tail_page) ==
1089 rb_commit_index(cpu_buffer)) {
1091 delta = ts - cpu_buffer->write_stamp;
1093 /* make sure this delta is calculated here */
1096 /* Did the write stamp get updated already? */
1097 if (unlikely(ts < cpu_buffer->write_stamp))
1100 if (test_time_stamp(delta)) {
1102 commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
1104 if (commit == -EBUSY)
1107 if (commit == -EAGAIN)
1110 RB_WARN_ON(cpu_buffer, commit < 0);
1113 /* Non commits have zero deltas */
1116 event = __rb_reserve_next(cpu_buffer, type, length, &ts);
1117 if (PTR_ERR(event) == -EAGAIN)
1121 if (unlikely(commit))
1123 * Ouch! We needed a timestamp and it was commited. But
1124 * we didn't get our event reserved.
1126 rb_set_commit_to_write(cpu_buffer);
1131 * If the timestamp was commited, make the commit our entry
1132 * now so that we will update it when needed.
1135 rb_set_commit_event(cpu_buffer, event);
1136 else if (!rb_is_commit(cpu_buffer, event))
1139 event->time_delta = delta;
1144 static DEFINE_PER_CPU(int, rb_need_resched);
1147 * ring_buffer_lock_reserve - reserve a part of the buffer
1148 * @buffer: the ring buffer to reserve from
1149 * @length: the length of the data to reserve (excluding event header)
1150 * @flags: a pointer to save the interrupt flags
1152 * Returns a reseverd event on the ring buffer to copy directly to.
1153 * The user of this interface will need to get the body to write into
1154 * and can use the ring_buffer_event_data() interface.
1156 * The length is the length of the data needed, not the event length
1157 * which also includes the event header.
1159 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
1160 * If NULL is returned, then nothing has been allocated or locked.
1162 struct ring_buffer_event *
1163 ring_buffer_lock_reserve(struct ring_buffer *buffer,
1164 unsigned long length,
1165 unsigned long *flags)
1167 struct ring_buffer_per_cpu *cpu_buffer;
1168 struct ring_buffer_event *event;
1171 if (ring_buffers_off)
1174 if (atomic_read(&buffer->record_disabled))
1177 /* If we are tracing schedule, we don't want to recurse */
1178 resched = need_resched();
1179 preempt_disable_notrace();
1181 cpu = raw_smp_processor_id();
1183 if (!cpu_isset(cpu, buffer->cpumask))
1186 cpu_buffer = buffer->buffers[cpu];
1188 if (atomic_read(&cpu_buffer->record_disabled))
1191 length = rb_calculate_event_length(length);
1192 if (length > BUF_PAGE_SIZE)
1195 event = rb_reserve_next_event(cpu_buffer, RINGBUF_TYPE_DATA, length);
1200 * Need to store resched state on this cpu.
1201 * Only the first needs to.
1204 if (preempt_count() == 1)
1205 per_cpu(rb_need_resched, cpu) = resched;
1211 preempt_enable_notrace();
1213 preempt_enable_notrace();
1217 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1218 struct ring_buffer_event *event)
1220 cpu_buffer->entries++;
1222 /* Only process further if we own the commit */
1223 if (!rb_is_commit(cpu_buffer, event))
1226 cpu_buffer->write_stamp += event->time_delta;
1228 rb_set_commit_to_write(cpu_buffer);
1232 * ring_buffer_unlock_commit - commit a reserved
1233 * @buffer: The buffer to commit to
1234 * @event: The event pointer to commit.
1235 * @flags: the interrupt flags received from ring_buffer_lock_reserve.
1237 * This commits the data to the ring buffer, and releases any locks held.
1239 * Must be paired with ring_buffer_lock_reserve.
1241 int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1242 struct ring_buffer_event *event,
1243 unsigned long flags)
1245 struct ring_buffer_per_cpu *cpu_buffer;
1246 int cpu = raw_smp_processor_id();
1248 cpu_buffer = buffer->buffers[cpu];
1250 rb_commit(cpu_buffer, event);
1253 * Only the last preempt count needs to restore preemption.
1255 if (preempt_count() == 1) {
1256 if (per_cpu(rb_need_resched, cpu))
1257 preempt_enable_no_resched_notrace();
1259 preempt_enable_notrace();
1261 preempt_enable_no_resched_notrace();
1267 * ring_buffer_write - write data to the buffer without reserving
1268 * @buffer: The ring buffer to write to.
1269 * @length: The length of the data being written (excluding the event header)
1270 * @data: The data to write to the buffer.
1272 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
1273 * one function. If you already have the data to write to the buffer, it
1274 * may be easier to simply call this function.
1276 * Note, like ring_buffer_lock_reserve, the length is the length of the data
1277 * and not the length of the event which would hold the header.
1279 int ring_buffer_write(struct ring_buffer *buffer,
1280 unsigned long length,
1283 struct ring_buffer_per_cpu *cpu_buffer;
1284 struct ring_buffer_event *event;
1285 unsigned long event_length;
1290 if (ring_buffers_off)
1293 if (atomic_read(&buffer->record_disabled))
1296 resched = need_resched();
1297 preempt_disable_notrace();
1299 cpu = raw_smp_processor_id();
1301 if (!cpu_isset(cpu, buffer->cpumask))
1304 cpu_buffer = buffer->buffers[cpu];
1306 if (atomic_read(&cpu_buffer->record_disabled))
1309 event_length = rb_calculate_event_length(length);
1310 event = rb_reserve_next_event(cpu_buffer,
1311 RINGBUF_TYPE_DATA, event_length);
1315 body = rb_event_data(event);
1317 memcpy(body, data, length);
1319 rb_commit(cpu_buffer, event);
1324 preempt_enable_no_resched_notrace();
1326 preempt_enable_notrace();
1331 static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1333 struct buffer_page *reader = cpu_buffer->reader_page;
1334 struct buffer_page *head = cpu_buffer->head_page;
1335 struct buffer_page *commit = cpu_buffer->commit_page;
1337 return reader->read == rb_page_commit(reader) &&
1338 (commit == reader ||
1340 head->read == rb_page_commit(commit)));
1344 * ring_buffer_record_disable - stop all writes into the buffer
1345 * @buffer: The ring buffer to stop writes to.
1347 * This prevents all writes to the buffer. Any attempt to write
1348 * to the buffer after this will fail and return NULL.
1350 * The caller should call synchronize_sched() after this.
1352 void ring_buffer_record_disable(struct ring_buffer *buffer)
1354 atomic_inc(&buffer->record_disabled);
1358 * ring_buffer_record_enable - enable writes to the buffer
1359 * @buffer: The ring buffer to enable writes
1361 * Note, multiple disables will need the same number of enables
1362 * to truely enable the writing (much like preempt_disable).
1364 void ring_buffer_record_enable(struct ring_buffer *buffer)
1366 atomic_dec(&buffer->record_disabled);
1370 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1371 * @buffer: The ring buffer to stop writes to.
1372 * @cpu: The CPU buffer to stop
1374 * This prevents all writes to the buffer. Any attempt to write
1375 * to the buffer after this will fail and return NULL.
1377 * The caller should call synchronize_sched() after this.
1379 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1381 struct ring_buffer_per_cpu *cpu_buffer;
1383 if (!cpu_isset(cpu, buffer->cpumask))
1386 cpu_buffer = buffer->buffers[cpu];
1387 atomic_inc(&cpu_buffer->record_disabled);
1391 * ring_buffer_record_enable_cpu - enable writes to the buffer
1392 * @buffer: The ring buffer to enable writes
1393 * @cpu: The CPU to enable.
1395 * Note, multiple disables will need the same number of enables
1396 * to truely enable the writing (much like preempt_disable).
1398 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1400 struct ring_buffer_per_cpu *cpu_buffer;
1402 if (!cpu_isset(cpu, buffer->cpumask))
1405 cpu_buffer = buffer->buffers[cpu];
1406 atomic_dec(&cpu_buffer->record_disabled);
1410 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1411 * @buffer: The ring buffer
1412 * @cpu: The per CPU buffer to get the entries from.
1414 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1416 struct ring_buffer_per_cpu *cpu_buffer;
1418 if (!cpu_isset(cpu, buffer->cpumask))
1421 cpu_buffer = buffer->buffers[cpu];
1422 return cpu_buffer->entries;
1426 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1427 * @buffer: The ring buffer
1428 * @cpu: The per CPU buffer to get the number of overruns from
1430 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1432 struct ring_buffer_per_cpu *cpu_buffer;
1434 if (!cpu_isset(cpu, buffer->cpumask))
1437 cpu_buffer = buffer->buffers[cpu];
1438 return cpu_buffer->overrun;
1442 * ring_buffer_entries - get the number of entries in a buffer
1443 * @buffer: The ring buffer
1445 * Returns the total number of entries in the ring buffer
1448 unsigned long ring_buffer_entries(struct ring_buffer *buffer)
1450 struct ring_buffer_per_cpu *cpu_buffer;
1451 unsigned long entries = 0;
1454 /* if you care about this being correct, lock the buffer */
1455 for_each_buffer_cpu(buffer, cpu) {
1456 cpu_buffer = buffer->buffers[cpu];
1457 entries += cpu_buffer->entries;
1464 * ring_buffer_overrun_cpu - get the number of overruns in buffer
1465 * @buffer: The ring buffer
1467 * Returns the total number of overruns in the ring buffer
1470 unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
1472 struct ring_buffer_per_cpu *cpu_buffer;
1473 unsigned long overruns = 0;
1476 /* if you care about this being correct, lock the buffer */
1477 for_each_buffer_cpu(buffer, cpu) {
1478 cpu_buffer = buffer->buffers[cpu];
1479 overruns += cpu_buffer->overrun;
1486 * ring_buffer_iter_reset - reset an iterator
1487 * @iter: The iterator to reset
1489 * Resets the iterator, so that it will start from the beginning
1492 void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1494 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1496 /* Iterator usage is expected to have record disabled */
1497 if (list_empty(&cpu_buffer->reader_page->list)) {
1498 iter->head_page = cpu_buffer->head_page;
1499 iter->head = cpu_buffer->head_page->read;
1501 iter->head_page = cpu_buffer->reader_page;
1502 iter->head = cpu_buffer->reader_page->read;
1505 iter->read_stamp = cpu_buffer->read_stamp;
1507 iter->read_stamp = iter->head_page->time_stamp;
1511 * ring_buffer_iter_empty - check if an iterator has no more to read
1512 * @iter: The iterator to check
1514 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
1516 struct ring_buffer_per_cpu *cpu_buffer;
1518 cpu_buffer = iter->cpu_buffer;
1520 return iter->head_page == cpu_buffer->commit_page &&
1521 iter->head == rb_commit_index(cpu_buffer);
1525 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1526 struct ring_buffer_event *event)
1530 switch (event->type) {
1531 case RINGBUF_TYPE_PADDING:
1534 case RINGBUF_TYPE_TIME_EXTEND:
1535 delta = event->array[0];
1537 delta += event->time_delta;
1538 cpu_buffer->read_stamp += delta;
1541 case RINGBUF_TYPE_TIME_STAMP:
1542 /* FIXME: not implemented */
1545 case RINGBUF_TYPE_DATA:
1546 cpu_buffer->read_stamp += event->time_delta;
1556 rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
1557 struct ring_buffer_event *event)
1561 switch (event->type) {
1562 case RINGBUF_TYPE_PADDING:
1565 case RINGBUF_TYPE_TIME_EXTEND:
1566 delta = event->array[0];
1568 delta += event->time_delta;
1569 iter->read_stamp += delta;
1572 case RINGBUF_TYPE_TIME_STAMP:
1573 /* FIXME: not implemented */
1576 case RINGBUF_TYPE_DATA:
1577 iter->read_stamp += event->time_delta;
1586 static struct buffer_page *
1587 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1589 struct buffer_page *reader = NULL;
1590 unsigned long flags;
1593 spin_lock_irqsave(&cpu_buffer->lock, flags);
1597 * This should normally only loop twice. But because the
1598 * start of the reader inserts an empty page, it causes
1599 * a case where we will loop three times. There should be no
1600 * reason to loop four times (that I know of).
1602 if (unlikely(++nr_loops > 3)) {
1603 RB_WARN_ON(cpu_buffer, 1);
1608 reader = cpu_buffer->reader_page;
1610 /* If there's more to read, return this page */
1611 if (cpu_buffer->reader_page->read < rb_page_size(reader))
1614 /* Never should we have an index greater than the size */
1615 RB_WARN_ON(cpu_buffer,
1616 cpu_buffer->reader_page->read > rb_page_size(reader));
1618 /* check if we caught up to the tail */
1620 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
1624 * Splice the empty reader page into the list around the head.
1625 * Reset the reader page to size zero.
1628 reader = cpu_buffer->head_page;
1629 cpu_buffer->reader_page->list.next = reader->list.next;
1630 cpu_buffer->reader_page->list.prev = reader->list.prev;
1632 local_set(&cpu_buffer->reader_page->write, 0);
1633 local_set(&cpu_buffer->reader_page->commit, 0);
1635 /* Make the reader page now replace the head */
1636 reader->list.prev->next = &cpu_buffer->reader_page->list;
1637 reader->list.next->prev = &cpu_buffer->reader_page->list;
1640 * If the tail is on the reader, then we must set the head
1641 * to the inserted page, otherwise we set it one before.
1643 cpu_buffer->head_page = cpu_buffer->reader_page;
1645 if (cpu_buffer->commit_page != reader)
1646 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
1648 /* Finally update the reader page to the new head */
1649 cpu_buffer->reader_page = reader;
1650 rb_reset_reader_page(cpu_buffer);
1655 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
1660 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
1662 struct ring_buffer_event *event;
1663 struct buffer_page *reader;
1666 reader = rb_get_reader_page(cpu_buffer);
1668 /* This function should not be called when buffer is empty */
1671 event = rb_reader_event(cpu_buffer);
1673 if (event->type == RINGBUF_TYPE_DATA)
1674 cpu_buffer->entries--;
1676 rb_update_read_stamp(cpu_buffer, event);
1678 length = rb_event_length(event);
1679 cpu_buffer->reader_page->read += length;
1682 static void rb_advance_iter(struct ring_buffer_iter *iter)
1684 struct ring_buffer *buffer;
1685 struct ring_buffer_per_cpu *cpu_buffer;
1686 struct ring_buffer_event *event;
1689 cpu_buffer = iter->cpu_buffer;
1690 buffer = cpu_buffer->buffer;
1693 * Check if we are at the end of the buffer.
1695 if (iter->head >= rb_page_size(iter->head_page)) {
1696 BUG_ON(iter->head_page == cpu_buffer->commit_page);
1701 event = rb_iter_head_event(iter);
1703 length = rb_event_length(event);
1706 * This should not be called to advance the header if we are
1707 * at the tail of the buffer.
1709 BUG_ON((iter->head_page == cpu_buffer->commit_page) &&
1710 (iter->head + length > rb_commit_index(cpu_buffer)));
1712 rb_update_iter_read_stamp(iter, event);
1714 iter->head += length;
1716 /* check for end of page padding */
1717 if ((iter->head >= rb_page_size(iter->head_page)) &&
1718 (iter->head_page != cpu_buffer->commit_page))
1719 rb_advance_iter(iter);
1723 * ring_buffer_peek - peek at the next event to be read
1724 * @buffer: The ring buffer to read
1725 * @cpu: The cpu to peak at
1726 * @ts: The timestamp counter of this event.
1728 * This will return the event that will be read next, but does
1729 * not consume the data.
1731 struct ring_buffer_event *
1732 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1734 struct ring_buffer_per_cpu *cpu_buffer;
1735 struct ring_buffer_event *event;
1736 struct buffer_page *reader;
1739 if (!cpu_isset(cpu, buffer->cpumask))
1742 cpu_buffer = buffer->buffers[cpu];
1746 * We repeat when a timestamp is encountered. It is possible
1747 * to get multiple timestamps from an interrupt entering just
1748 * as one timestamp is about to be written. The max times
1749 * that this can happen is the number of nested interrupts we
1750 * can have. Nesting 10 deep of interrupts is clearly
1753 if (unlikely(++nr_loops > 10)) {
1754 RB_WARN_ON(cpu_buffer, 1);
1758 reader = rb_get_reader_page(cpu_buffer);
1762 event = rb_reader_event(cpu_buffer);
1764 switch (event->type) {
1765 case RINGBUF_TYPE_PADDING:
1766 RB_WARN_ON(cpu_buffer, 1);
1767 rb_advance_reader(cpu_buffer);
1770 case RINGBUF_TYPE_TIME_EXTEND:
1771 /* Internal data, OK to advance */
1772 rb_advance_reader(cpu_buffer);
1775 case RINGBUF_TYPE_TIME_STAMP:
1776 /* FIXME: not implemented */
1777 rb_advance_reader(cpu_buffer);
1780 case RINGBUF_TYPE_DATA:
1782 *ts = cpu_buffer->read_stamp + event->time_delta;
1783 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1795 * ring_buffer_iter_peek - peek at the next event to be read
1796 * @iter: The ring buffer iterator
1797 * @ts: The timestamp counter of this event.
1799 * This will return the event that will be read next, but does
1800 * not increment the iterator.
1802 struct ring_buffer_event *
1803 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1805 struct ring_buffer *buffer;
1806 struct ring_buffer_per_cpu *cpu_buffer;
1807 struct ring_buffer_event *event;
1810 if (ring_buffer_iter_empty(iter))
1813 cpu_buffer = iter->cpu_buffer;
1814 buffer = cpu_buffer->buffer;
1818 * We repeat when a timestamp is encountered. It is possible
1819 * to get multiple timestamps from an interrupt entering just
1820 * as one timestamp is about to be written. The max times
1821 * that this can happen is the number of nested interrupts we
1822 * can have. Nesting 10 deep of interrupts is clearly
1825 if (unlikely(++nr_loops > 10)) {
1826 RB_WARN_ON(cpu_buffer, 1);
1830 if (rb_per_cpu_empty(cpu_buffer))
1833 event = rb_iter_head_event(iter);
1835 switch (event->type) {
1836 case RINGBUF_TYPE_PADDING:
1840 case RINGBUF_TYPE_TIME_EXTEND:
1841 /* Internal data, OK to advance */
1842 rb_advance_iter(iter);
1845 case RINGBUF_TYPE_TIME_STAMP:
1846 /* FIXME: not implemented */
1847 rb_advance_iter(iter);
1850 case RINGBUF_TYPE_DATA:
1852 *ts = iter->read_stamp + event->time_delta;
1853 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1865 * ring_buffer_consume - return an event and consume it
1866 * @buffer: The ring buffer to get the next event from
1868 * Returns the next event in the ring buffer, and that event is consumed.
1869 * Meaning, that sequential reads will keep returning a different event,
1870 * and eventually empty the ring buffer if the producer is slower.
1872 struct ring_buffer_event *
1873 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
1875 struct ring_buffer_per_cpu *cpu_buffer;
1876 struct ring_buffer_event *event;
1878 if (!cpu_isset(cpu, buffer->cpumask))
1881 event = ring_buffer_peek(buffer, cpu, ts);
1885 cpu_buffer = buffer->buffers[cpu];
1886 rb_advance_reader(cpu_buffer);
1892 * ring_buffer_read_start - start a non consuming read of the buffer
1893 * @buffer: The ring buffer to read from
1894 * @cpu: The cpu buffer to iterate over
1896 * This starts up an iteration through the buffer. It also disables
1897 * the recording to the buffer until the reading is finished.
1898 * This prevents the reading from being corrupted. This is not
1899 * a consuming read, so a producer is not expected.
1901 * Must be paired with ring_buffer_finish.
1903 struct ring_buffer_iter *
1904 ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
1906 struct ring_buffer_per_cpu *cpu_buffer;
1907 struct ring_buffer_iter *iter;
1908 unsigned long flags;
1910 if (!cpu_isset(cpu, buffer->cpumask))
1913 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
1917 cpu_buffer = buffer->buffers[cpu];
1919 iter->cpu_buffer = cpu_buffer;
1921 atomic_inc(&cpu_buffer->record_disabled);
1922 synchronize_sched();
1924 spin_lock_irqsave(&cpu_buffer->lock, flags);
1925 ring_buffer_iter_reset(iter);
1926 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
1932 * ring_buffer_finish - finish reading the iterator of the buffer
1933 * @iter: The iterator retrieved by ring_buffer_start
1935 * This re-enables the recording to the buffer, and frees the
1939 ring_buffer_read_finish(struct ring_buffer_iter *iter)
1941 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1943 atomic_dec(&cpu_buffer->record_disabled);
1948 * ring_buffer_read - read the next item in the ring buffer by the iterator
1949 * @iter: The ring buffer iterator
1950 * @ts: The time stamp of the event read.
1952 * This reads the next event in the ring buffer and increments the iterator.
1954 struct ring_buffer_event *
1955 ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
1957 struct ring_buffer_event *event;
1959 event = ring_buffer_iter_peek(iter, ts);
1963 rb_advance_iter(iter);
1969 * ring_buffer_size - return the size of the ring buffer (in bytes)
1970 * @buffer: The ring buffer.
1972 unsigned long ring_buffer_size(struct ring_buffer *buffer)
1974 return BUF_PAGE_SIZE * buffer->pages;
1978 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
1980 cpu_buffer->head_page
1981 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
1982 local_set(&cpu_buffer->head_page->write, 0);
1983 local_set(&cpu_buffer->head_page->commit, 0);
1985 cpu_buffer->head_page->read = 0;
1987 cpu_buffer->tail_page = cpu_buffer->head_page;
1988 cpu_buffer->commit_page = cpu_buffer->head_page;
1990 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1991 local_set(&cpu_buffer->reader_page->write, 0);
1992 local_set(&cpu_buffer->reader_page->commit, 0);
1993 cpu_buffer->reader_page->read = 0;
1995 cpu_buffer->overrun = 0;
1996 cpu_buffer->entries = 0;
2000 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
2001 * @buffer: The ring buffer to reset a per cpu buffer of
2002 * @cpu: The CPU buffer to be reset
2004 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2006 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2007 unsigned long flags;
2009 if (!cpu_isset(cpu, buffer->cpumask))
2012 spin_lock_irqsave(&cpu_buffer->lock, flags);
2014 rb_reset_cpu(cpu_buffer);
2016 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
2020 * ring_buffer_reset - reset a ring buffer
2021 * @buffer: The ring buffer to reset all cpu buffers
2023 void ring_buffer_reset(struct ring_buffer *buffer)
2027 for_each_buffer_cpu(buffer, cpu)
2028 ring_buffer_reset_cpu(buffer, cpu);
2032 * rind_buffer_empty - is the ring buffer empty?
2033 * @buffer: The ring buffer to test
2035 int ring_buffer_empty(struct ring_buffer *buffer)
2037 struct ring_buffer_per_cpu *cpu_buffer;
2040 /* yes this is racy, but if you don't like the race, lock the buffer */
2041 for_each_buffer_cpu(buffer, cpu) {
2042 cpu_buffer = buffer->buffers[cpu];
2043 if (!rb_per_cpu_empty(cpu_buffer))
2050 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
2051 * @buffer: The ring buffer
2052 * @cpu: The CPU buffer to test
2054 int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2056 struct ring_buffer_per_cpu *cpu_buffer;
2058 if (!cpu_isset(cpu, buffer->cpumask))
2061 cpu_buffer = buffer->buffers[cpu];
2062 return rb_per_cpu_empty(cpu_buffer);
2066 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
2067 * @buffer_a: One buffer to swap with
2068 * @buffer_b: The other buffer to swap with
2070 * This function is useful for tracers that want to take a "snapshot"
2071 * of a CPU buffer and has another back up buffer lying around.
2072 * it is expected that the tracer handles the cpu buffer not being
2073 * used at the moment.
2075 int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2076 struct ring_buffer *buffer_b, int cpu)
2078 struct ring_buffer_per_cpu *cpu_buffer_a;
2079 struct ring_buffer_per_cpu *cpu_buffer_b;
2081 if (!cpu_isset(cpu, buffer_a->cpumask) ||
2082 !cpu_isset(cpu, buffer_b->cpumask))
2085 /* At least make sure the two buffers are somewhat the same */
2086 if (buffer_a->size != buffer_b->size ||
2087 buffer_a->pages != buffer_b->pages)
2090 cpu_buffer_a = buffer_a->buffers[cpu];
2091 cpu_buffer_b = buffer_b->buffers[cpu];
2094 * We can't do a synchronize_sched here because this
2095 * function can be called in atomic context.
2096 * Normally this will be called from the same CPU as cpu.
2097 * If not it's up to the caller to protect this.
2099 atomic_inc(&cpu_buffer_a->record_disabled);
2100 atomic_inc(&cpu_buffer_b->record_disabled);
2102 buffer_a->buffers[cpu] = cpu_buffer_b;
2103 buffer_b->buffers[cpu] = cpu_buffer_a;
2105 cpu_buffer_b->buffer = buffer_a;
2106 cpu_buffer_a->buffer = buffer_b;
2108 atomic_dec(&cpu_buffer_a->record_disabled);
2109 atomic_dec(&cpu_buffer_b->record_disabled);
2115 rb_simple_read(struct file *filp, char __user *ubuf,
2116 size_t cnt, loff_t *ppos)
2118 int *p = filp->private_data;
2122 /* !ring_buffers_off == tracing_on */
2123 r = sprintf(buf, "%d\n", !*p);
2125 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2129 rb_simple_write(struct file *filp, const char __user *ubuf,
2130 size_t cnt, loff_t *ppos)
2132 int *p = filp->private_data;
2137 if (cnt >= sizeof(buf))
2140 if (copy_from_user(&buf, ubuf, cnt))
2145 ret = strict_strtoul(buf, 10, &val);
2149 /* !ring_buffers_off == tracing_on */
2157 static struct file_operations rb_simple_fops = {
2158 .open = tracing_open_generic,
2159 .read = rb_simple_read,
2160 .write = rb_simple_write,
2164 static __init int rb_init_debugfs(void)
2166 struct dentry *d_tracer;
2167 struct dentry *entry;
2169 d_tracer = tracing_init_dentry();
2171 entry = debugfs_create_file("tracing_on", 0644, d_tracer,
2172 &ring_buffers_off, &rb_simple_fops);
2174 pr_warning("Could not create debugfs 'tracing_on' entry\n");
2179 fs_initcall(rb_init_debugfs);