4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
6 #include <linux/ring_buffer.h>
7 #include <linux/spinlock.h>
8 #include <linux/debugfs.h>
9 #include <linux/uaccess.h>
10 #include <linux/module.h>
11 #include <linux/percpu.h>
12 #include <linux/mutex.h>
13 #include <linux/sched.h> /* used for sched_clock() (for now) */
14 #include <linux/init.h>
15 #include <linux/hash.h>
16 #include <linux/list.h>
22 * A fast way to enable or disable all ring buffers is to
23 * call tracing_on or tracing_off. Turning off the ring buffers
24 * prevents all ring buffers from being recorded to.
25 * Turning this switch on, makes it OK to write to the
26 * ring buffer, if the ring buffer is enabled itself.
28 * There's three layers that must be on in order to write
31 * 1) This global flag must be set.
32 * 2) The ring buffer must be enabled for recording.
33 * 3) The per cpu buffer must be enabled for recording.
35 * In case of an anomaly, this global flag has a bit set that
36 * will permantly disable all ring buffers.
40 * Global flag to disable all recording to ring buffers
41 * This has two bits: ON, DISABLED
45 * 0 0 : ring buffers are off
46 * 1 0 : ring buffers are on
47 * X 1 : ring buffers are permanently disabled
51 RB_BUFFERS_ON_BIT = 0,
52 RB_BUFFERS_DISABLED_BIT = 1,
56 RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
57 RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
60 static long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
63 * tracing_on - enable all tracing buffers
65 * This function enables all tracing buffers that may have been
66 * disabled with tracing_off.
70 set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
74 * tracing_off - turn off all tracing buffers
76 * This function stops all tracing buffers from recording data.
77 * It does not disable any overhead the tracers themselves may
78 * be causing. This function simply causes all recording to
79 * the ring buffers to fail.
81 void tracing_off(void)
83 clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
87 * tracing_off_permanent - permanently disable ring buffers
89 * This function, once called, will disable all ring buffers
92 void tracing_off_permanent(void)
94 set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
99 /* Up this if you want to test the TIME_EXTENTS and normalization */
100 #define DEBUG_SHIFT 0
103 u64 ring_buffer_time_stamp(int cpu)
107 preempt_disable_notrace();
108 /* shift to debug/test normalization and TIME_EXTENTS */
109 time = sched_clock() << DEBUG_SHIFT;
110 preempt_enable_no_resched_notrace();
115 void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
117 /* Just stupid testing the normalize function and deltas */
121 #define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
122 #define RB_ALIGNMENT_SHIFT 2
123 #define RB_ALIGNMENT (1 << RB_ALIGNMENT_SHIFT)
124 #define RB_MAX_SMALL_DATA 28
127 RB_LEN_TIME_EXTEND = 8,
128 RB_LEN_TIME_STAMP = 16,
131 /* inline for ring buffer fast paths */
132 static inline unsigned
133 rb_event_length(struct ring_buffer_event *event)
137 switch (event->type) {
138 case RINGBUF_TYPE_PADDING:
142 case RINGBUF_TYPE_TIME_EXTEND:
143 return RB_LEN_TIME_EXTEND;
145 case RINGBUF_TYPE_TIME_STAMP:
146 return RB_LEN_TIME_STAMP;
148 case RINGBUF_TYPE_DATA:
150 length = event->len << RB_ALIGNMENT_SHIFT;
152 length = event->array[0];
153 return length + RB_EVNT_HDR_SIZE;
162 * ring_buffer_event_length - return the length of the event
163 * @event: the event to get the length of
165 unsigned ring_buffer_event_length(struct ring_buffer_event *event)
167 return rb_event_length(event);
170 /* inline for ring buffer fast paths */
172 rb_event_data(struct ring_buffer_event *event)
174 BUG_ON(event->type != RINGBUF_TYPE_DATA);
175 /* If length is in len field, then array[0] has the data */
177 return (void *)&event->array[0];
178 /* Otherwise length is in array[0] and array[1] has the data */
179 return (void *)&event->array[1];
183 * ring_buffer_event_data - return the data of the event
184 * @event: the event to get the data from
186 void *ring_buffer_event_data(struct ring_buffer_event *event)
188 return rb_event_data(event);
191 #define for_each_buffer_cpu(buffer, cpu) \
192 for_each_cpu_mask(cpu, buffer->cpumask)
195 #define TS_MASK ((1ULL << TS_SHIFT) - 1)
196 #define TS_DELTA_TEST (~TS_MASK)
198 struct buffer_data_page {
199 u64 time_stamp; /* page time stamp */
200 local_t commit; /* write commited index */
201 unsigned char data[]; /* data of buffer page */
205 local_t write; /* index for next write */
206 unsigned read; /* index for next read */
207 struct list_head list; /* list of free pages */
208 struct buffer_data_page *page; /* Actual data page */
211 static void rb_init_page(struct buffer_data_page *bpage)
213 local_set(&bpage->commit, 0);
217 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
220 static inline void free_buffer_page(struct buffer_page *bpage)
223 free_page((unsigned long)bpage->page);
228 * We need to fit the time_stamp delta into 27 bits.
230 static inline int test_time_stamp(u64 delta)
232 if (delta & TS_DELTA_TEST)
237 #define BUF_PAGE_SIZE (PAGE_SIZE - sizeof(struct buffer_data_page))
240 * head_page == tail_page && head == tail then buffer is empty.
242 struct ring_buffer_per_cpu {
244 struct ring_buffer *buffer;
245 spinlock_t reader_lock; /* serialize readers */
247 struct lock_class_key lock_key;
248 struct list_head pages;
249 struct buffer_page *head_page; /* read from head */
250 struct buffer_page *tail_page; /* write to tail */
251 struct buffer_page *commit_page; /* commited pages */
252 struct buffer_page *reader_page;
253 unsigned long overrun;
254 unsigned long entries;
257 atomic_t record_disabled;
265 atomic_t record_disabled;
269 struct ring_buffer_per_cpu **buffers;
272 struct ring_buffer_iter {
273 struct ring_buffer_per_cpu *cpu_buffer;
275 struct buffer_page *head_page;
279 /* buffer may be either ring_buffer or ring_buffer_per_cpu */
280 #define RB_WARN_ON(buffer, cond) \
282 int _____ret = unlikely(cond); \
284 atomic_inc(&buffer->record_disabled); \
291 * check_pages - integrity check of buffer pages
292 * @cpu_buffer: CPU buffer with pages to test
294 * As a safty measure we check to make sure the data pages have not
297 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
299 struct list_head *head = &cpu_buffer->pages;
300 struct buffer_page *bpage, *tmp;
302 if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
304 if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
307 list_for_each_entry_safe(bpage, tmp, head, list) {
308 if (RB_WARN_ON(cpu_buffer,
309 bpage->list.next->prev != &bpage->list))
311 if (RB_WARN_ON(cpu_buffer,
312 bpage->list.prev->next != &bpage->list))
319 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
322 struct list_head *head = &cpu_buffer->pages;
323 struct buffer_page *bpage, *tmp;
328 for (i = 0; i < nr_pages; i++) {
329 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
330 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
333 list_add(&bpage->list, &pages);
335 addr = __get_free_page(GFP_KERNEL);
338 bpage->page = (void *)addr;
339 rb_init_page(bpage->page);
342 list_splice(&pages, head);
344 rb_check_pages(cpu_buffer);
349 list_for_each_entry_safe(bpage, tmp, &pages, list) {
350 list_del_init(&bpage->list);
351 free_buffer_page(bpage);
356 static struct ring_buffer_per_cpu *
357 rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
359 struct ring_buffer_per_cpu *cpu_buffer;
360 struct buffer_page *bpage;
364 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
365 GFP_KERNEL, cpu_to_node(cpu));
369 cpu_buffer->cpu = cpu;
370 cpu_buffer->buffer = buffer;
371 spin_lock_init(&cpu_buffer->reader_lock);
372 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
373 INIT_LIST_HEAD(&cpu_buffer->pages);
375 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
376 GFP_KERNEL, cpu_to_node(cpu));
378 goto fail_free_buffer;
380 cpu_buffer->reader_page = bpage;
381 addr = __get_free_page(GFP_KERNEL);
383 goto fail_free_reader;
384 bpage->page = (void *)addr;
385 rb_init_page(bpage->page);
387 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
389 ret = rb_allocate_pages(cpu_buffer, buffer->pages);
391 goto fail_free_reader;
393 cpu_buffer->head_page
394 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
395 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
400 free_buffer_page(cpu_buffer->reader_page);
407 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
409 struct list_head *head = &cpu_buffer->pages;
410 struct buffer_page *bpage, *tmp;
412 list_del_init(&cpu_buffer->reader_page->list);
413 free_buffer_page(cpu_buffer->reader_page);
415 list_for_each_entry_safe(bpage, tmp, head, list) {
416 list_del_init(&bpage->list);
417 free_buffer_page(bpage);
423 * Causes compile errors if the struct buffer_page gets bigger
424 * than the struct page.
426 extern int ring_buffer_page_too_big(void);
429 * ring_buffer_alloc - allocate a new ring_buffer
430 * @size: the size in bytes that is needed.
431 * @flags: attributes to set for the ring buffer.
433 * Currently the only flag that is available is the RB_FL_OVERWRITE
434 * flag. This flag means that the buffer will overwrite old data
435 * when the buffer wraps. If this flag is not set, the buffer will
436 * drop data when the tail hits the head.
438 struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
440 struct ring_buffer *buffer;
444 /* Paranoid! Optimizes out when all is well */
445 if (sizeof(struct buffer_page) > sizeof(struct page))
446 ring_buffer_page_too_big();
449 /* keep it in its own cache line */
450 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
455 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
456 buffer->flags = flags;
458 /* need at least two pages */
459 if (buffer->pages == 1)
462 buffer->cpumask = cpu_possible_map;
463 buffer->cpus = nr_cpu_ids;
465 bsize = sizeof(void *) * nr_cpu_ids;
466 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
468 if (!buffer->buffers)
469 goto fail_free_buffer;
471 for_each_buffer_cpu(buffer, cpu) {
472 buffer->buffers[cpu] =
473 rb_allocate_cpu_buffer(buffer, cpu);
474 if (!buffer->buffers[cpu])
475 goto fail_free_buffers;
478 mutex_init(&buffer->mutex);
483 for_each_buffer_cpu(buffer, cpu) {
484 if (buffer->buffers[cpu])
485 rb_free_cpu_buffer(buffer->buffers[cpu]);
487 kfree(buffer->buffers);
495 * ring_buffer_free - free a ring buffer.
496 * @buffer: the buffer to free.
499 ring_buffer_free(struct ring_buffer *buffer)
503 for_each_buffer_cpu(buffer, cpu)
504 rb_free_cpu_buffer(buffer->buffers[cpu]);
509 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
512 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
514 struct buffer_page *bpage;
518 atomic_inc(&cpu_buffer->record_disabled);
521 for (i = 0; i < nr_pages; i++) {
522 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
524 p = cpu_buffer->pages.next;
525 bpage = list_entry(p, struct buffer_page, list);
526 list_del_init(&bpage->list);
527 free_buffer_page(bpage);
529 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
532 rb_reset_cpu(cpu_buffer);
534 rb_check_pages(cpu_buffer);
536 atomic_dec(&cpu_buffer->record_disabled);
541 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
542 struct list_head *pages, unsigned nr_pages)
544 struct buffer_page *bpage;
548 atomic_inc(&cpu_buffer->record_disabled);
551 for (i = 0; i < nr_pages; i++) {
552 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
555 bpage = list_entry(p, struct buffer_page, list);
556 list_del_init(&bpage->list);
557 list_add_tail(&bpage->list, &cpu_buffer->pages);
559 rb_reset_cpu(cpu_buffer);
561 rb_check_pages(cpu_buffer);
563 atomic_dec(&cpu_buffer->record_disabled);
567 * ring_buffer_resize - resize the ring buffer
568 * @buffer: the buffer to resize.
569 * @size: the new size.
571 * The tracer is responsible for making sure that the buffer is
572 * not being used while changing the size.
573 * Note: We may be able to change the above requirement by using
574 * RCU synchronizations.
576 * Minimum size is 2 * BUF_PAGE_SIZE.
578 * Returns -1 on failure.
580 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
582 struct ring_buffer_per_cpu *cpu_buffer;
583 unsigned nr_pages, rm_pages, new_pages;
584 struct buffer_page *bpage, *tmp;
585 unsigned long buffer_size;
591 * Always succeed at resizing a non-existent buffer:
596 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
597 size *= BUF_PAGE_SIZE;
598 buffer_size = buffer->pages * BUF_PAGE_SIZE;
600 /* we need a minimum of two pages */
601 if (size < BUF_PAGE_SIZE * 2)
602 size = BUF_PAGE_SIZE * 2;
604 if (size == buffer_size)
607 mutex_lock(&buffer->mutex);
609 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
611 if (size < buffer_size) {
613 /* easy case, just free pages */
614 if (RB_WARN_ON(buffer, nr_pages >= buffer->pages)) {
615 mutex_unlock(&buffer->mutex);
619 rm_pages = buffer->pages - nr_pages;
621 for_each_buffer_cpu(buffer, cpu) {
622 cpu_buffer = buffer->buffers[cpu];
623 rb_remove_pages(cpu_buffer, rm_pages);
629 * This is a bit more difficult. We only want to add pages
630 * when we can allocate enough for all CPUs. We do this
631 * by allocating all the pages and storing them on a local
632 * link list. If we succeed in our allocation, then we
633 * add these pages to the cpu_buffers. Otherwise we just free
634 * them all and return -ENOMEM;
636 if (RB_WARN_ON(buffer, nr_pages <= buffer->pages)) {
637 mutex_unlock(&buffer->mutex);
641 new_pages = nr_pages - buffer->pages;
643 for_each_buffer_cpu(buffer, cpu) {
644 for (i = 0; i < new_pages; i++) {
645 bpage = kzalloc_node(ALIGN(sizeof(*bpage),
647 GFP_KERNEL, cpu_to_node(cpu));
650 list_add(&bpage->list, &pages);
651 addr = __get_free_page(GFP_KERNEL);
654 bpage->page = (void *)addr;
655 rb_init_page(bpage->page);
659 for_each_buffer_cpu(buffer, cpu) {
660 cpu_buffer = buffer->buffers[cpu];
661 rb_insert_pages(cpu_buffer, &pages, new_pages);
664 if (RB_WARN_ON(buffer, !list_empty(&pages))) {
665 mutex_unlock(&buffer->mutex);
670 buffer->pages = nr_pages;
671 mutex_unlock(&buffer->mutex);
676 list_for_each_entry_safe(bpage, tmp, &pages, list) {
677 list_del_init(&bpage->list);
678 free_buffer_page(bpage);
680 mutex_unlock(&buffer->mutex);
684 static inline int rb_null_event(struct ring_buffer_event *event)
686 return event->type == RINGBUF_TYPE_PADDING;
690 __rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
692 return bpage->data + index;
695 static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
697 return bpage->page->data + index;
700 static inline struct ring_buffer_event *
701 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
703 return __rb_page_index(cpu_buffer->reader_page,
704 cpu_buffer->reader_page->read);
707 static inline struct ring_buffer_event *
708 rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
710 return __rb_page_index(cpu_buffer->head_page,
711 cpu_buffer->head_page->read);
714 static inline struct ring_buffer_event *
715 rb_iter_head_event(struct ring_buffer_iter *iter)
717 return __rb_page_index(iter->head_page, iter->head);
720 static inline unsigned rb_page_write(struct buffer_page *bpage)
722 return local_read(&bpage->write);
725 static inline unsigned rb_page_commit(struct buffer_page *bpage)
727 return local_read(&bpage->page->commit);
730 /* Size is determined by what has been commited */
731 static inline unsigned rb_page_size(struct buffer_page *bpage)
733 return rb_page_commit(bpage);
736 static inline unsigned
737 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
739 return rb_page_commit(cpu_buffer->commit_page);
742 static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
744 return rb_page_commit(cpu_buffer->head_page);
748 * When the tail hits the head and the buffer is in overwrite mode,
749 * the head jumps to the next page and all content on the previous
750 * page is discarded. But before doing so, we update the overrun
751 * variable of the buffer.
753 static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
755 struct ring_buffer_event *event;
758 for (head = 0; head < rb_head_size(cpu_buffer);
759 head += rb_event_length(event)) {
761 event = __rb_page_index(cpu_buffer->head_page, head);
762 if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
764 /* Only count data entries */
765 if (event->type != RINGBUF_TYPE_DATA)
767 cpu_buffer->overrun++;
768 cpu_buffer->entries--;
772 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
773 struct buffer_page **bpage)
775 struct list_head *p = (*bpage)->list.next;
777 if (p == &cpu_buffer->pages)
780 *bpage = list_entry(p, struct buffer_page, list);
783 static inline unsigned
784 rb_event_index(struct ring_buffer_event *event)
786 unsigned long addr = (unsigned long)event;
788 return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
792 rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
793 struct ring_buffer_event *event)
795 unsigned long addr = (unsigned long)event;
798 index = rb_event_index(event);
801 return cpu_buffer->commit_page->page == (void *)addr &&
802 rb_commit_index(cpu_buffer) == index;
806 rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
807 struct ring_buffer_event *event)
809 unsigned long addr = (unsigned long)event;
812 index = rb_event_index(event);
815 while (cpu_buffer->commit_page->page != (void *)addr) {
816 if (RB_WARN_ON(cpu_buffer,
817 cpu_buffer->commit_page == cpu_buffer->tail_page))
819 cpu_buffer->commit_page->page->commit =
820 cpu_buffer->commit_page->write;
821 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
822 cpu_buffer->write_stamp =
823 cpu_buffer->commit_page->page->time_stamp;
826 /* Now set the commit to the event's index */
827 local_set(&cpu_buffer->commit_page->page->commit, index);
831 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
834 * We only race with interrupts and NMIs on this CPU.
835 * If we own the commit event, then we can commit
836 * all others that interrupted us, since the interruptions
837 * are in stack format (they finish before they come
838 * back to us). This allows us to do a simple loop to
839 * assign the commit to the tail.
842 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
843 cpu_buffer->commit_page->page->commit =
844 cpu_buffer->commit_page->write;
845 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
846 cpu_buffer->write_stamp =
847 cpu_buffer->commit_page->page->time_stamp;
848 /* add barrier to keep gcc from optimizing too much */
851 while (rb_commit_index(cpu_buffer) !=
852 rb_page_write(cpu_buffer->commit_page)) {
853 cpu_buffer->commit_page->page->commit =
854 cpu_buffer->commit_page->write;
858 /* again, keep gcc from optimizing */
862 * If an interrupt came in just after the first while loop
863 * and pushed the tail page forward, we will be left with
864 * a dangling commit that will never go forward.
866 if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
870 static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
872 cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
873 cpu_buffer->reader_page->read = 0;
876 static inline void rb_inc_iter(struct ring_buffer_iter *iter)
878 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
881 * The iterator could be on the reader page (it starts there).
882 * But the head could have moved, since the reader was
883 * found. Check for this case and assign the iterator
884 * to the head page instead of next.
886 if (iter->head_page == cpu_buffer->reader_page)
887 iter->head_page = cpu_buffer->head_page;
889 rb_inc_page(cpu_buffer, &iter->head_page);
891 iter->read_stamp = iter->head_page->page->time_stamp;
896 * ring_buffer_update_event - update event type and data
897 * @event: the even to update
898 * @type: the type of event
899 * @length: the size of the event field in the ring buffer
901 * Update the type and data fields of the event. The length
902 * is the actual size that is written to the ring buffer,
903 * and with this, we can determine what to place into the
907 rb_update_event(struct ring_buffer_event *event,
908 unsigned type, unsigned length)
914 case RINGBUF_TYPE_PADDING:
917 case RINGBUF_TYPE_TIME_EXTEND:
919 (RB_LEN_TIME_EXTEND + (RB_ALIGNMENT-1))
920 >> RB_ALIGNMENT_SHIFT;
923 case RINGBUF_TYPE_TIME_STAMP:
925 (RB_LEN_TIME_STAMP + (RB_ALIGNMENT-1))
926 >> RB_ALIGNMENT_SHIFT;
929 case RINGBUF_TYPE_DATA:
930 length -= RB_EVNT_HDR_SIZE;
931 if (length > RB_MAX_SMALL_DATA) {
933 event->array[0] = length;
936 (length + (RB_ALIGNMENT-1))
937 >> RB_ALIGNMENT_SHIFT;
944 static inline unsigned rb_calculate_event_length(unsigned length)
946 struct ring_buffer_event event; /* Used only for sizeof array */
948 /* zero length can cause confusions */
952 if (length > RB_MAX_SMALL_DATA)
953 length += sizeof(event.array[0]);
955 length += RB_EVNT_HDR_SIZE;
956 length = ALIGN(length, RB_ALIGNMENT);
961 static struct ring_buffer_event *
962 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
963 unsigned type, unsigned long length, u64 *ts)
965 struct buffer_page *tail_page, *head_page, *reader_page;
966 unsigned long tail, write;
967 struct ring_buffer *buffer = cpu_buffer->buffer;
968 struct ring_buffer_event *event;
971 tail_page = cpu_buffer->tail_page;
972 write = local_add_return(length, &tail_page->write);
973 tail = write - length;
975 /* See if we shot pass the end of this buffer page */
976 if (write > BUF_PAGE_SIZE) {
977 struct buffer_page *next_page = tail_page;
979 local_irq_save(flags);
980 __raw_spin_lock(&cpu_buffer->lock);
982 rb_inc_page(cpu_buffer, &next_page);
984 head_page = cpu_buffer->head_page;
985 reader_page = cpu_buffer->reader_page;
987 /* we grabbed the lock before incrementing */
988 if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
992 * If for some reason, we had an interrupt storm that made
993 * it all the way around the buffer, bail, and warn
996 if (unlikely(next_page == cpu_buffer->commit_page)) {
1001 if (next_page == head_page) {
1002 if (!(buffer->flags & RB_FL_OVERWRITE)) {
1004 if (tail <= BUF_PAGE_SIZE)
1005 local_set(&tail_page->write, tail);
1009 /* tail_page has not moved yet? */
1010 if (tail_page == cpu_buffer->tail_page) {
1011 /* count overflows */
1012 rb_update_overflow(cpu_buffer);
1014 rb_inc_page(cpu_buffer, &head_page);
1015 cpu_buffer->head_page = head_page;
1016 cpu_buffer->head_page->read = 0;
1021 * If the tail page is still the same as what we think
1022 * it is, then it is up to us to update the tail
1025 if (tail_page == cpu_buffer->tail_page) {
1026 local_set(&next_page->write, 0);
1027 local_set(&next_page->page->commit, 0);
1028 cpu_buffer->tail_page = next_page;
1030 /* reread the time stamp */
1031 *ts = ring_buffer_time_stamp(cpu_buffer->cpu);
1032 cpu_buffer->tail_page->page->time_stamp = *ts;
1036 * The actual tail page has moved forward.
1038 if (tail < BUF_PAGE_SIZE) {
1039 /* Mark the rest of the page with padding */
1040 event = __rb_page_index(tail_page, tail);
1041 event->type = RINGBUF_TYPE_PADDING;
1044 if (tail <= BUF_PAGE_SIZE)
1045 /* Set the write back to the previous setting */
1046 local_set(&tail_page->write, tail);
1049 * If this was a commit entry that failed,
1050 * increment that too
1052 if (tail_page == cpu_buffer->commit_page &&
1053 tail == rb_commit_index(cpu_buffer)) {
1054 rb_set_commit_to_write(cpu_buffer);
1057 __raw_spin_unlock(&cpu_buffer->lock);
1058 local_irq_restore(flags);
1060 /* fail and let the caller try again */
1061 return ERR_PTR(-EAGAIN);
1064 /* We reserved something on the buffer */
1066 if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE))
1069 event = __rb_page_index(tail_page, tail);
1070 rb_update_event(event, type, length);
1073 * If this is a commit and the tail is zero, then update
1074 * this page's time stamp.
1076 if (!tail && rb_is_commit(cpu_buffer, event))
1077 cpu_buffer->commit_page->page->time_stamp = *ts;
1082 __raw_spin_unlock(&cpu_buffer->lock);
1083 local_irq_restore(flags);
1088 rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1089 u64 *ts, u64 *delta)
1091 struct ring_buffer_event *event;
1095 if (unlikely(*delta > (1ULL << 59) && !once++)) {
1096 printk(KERN_WARNING "Delta way too big! %llu"
1097 " ts=%llu write stamp = %llu\n",
1098 (unsigned long long)*delta,
1099 (unsigned long long)*ts,
1100 (unsigned long long)cpu_buffer->write_stamp);
1105 * The delta is too big, we to add a
1108 event = __rb_reserve_next(cpu_buffer,
1109 RINGBUF_TYPE_TIME_EXTEND,
1115 if (PTR_ERR(event) == -EAGAIN)
1118 /* Only a commited time event can update the write stamp */
1119 if (rb_is_commit(cpu_buffer, event)) {
1121 * If this is the first on the page, then we need to
1122 * update the page itself, and just put in a zero.
1124 if (rb_event_index(event)) {
1125 event->time_delta = *delta & TS_MASK;
1126 event->array[0] = *delta >> TS_SHIFT;
1128 cpu_buffer->commit_page->page->time_stamp = *ts;
1129 event->time_delta = 0;
1130 event->array[0] = 0;
1132 cpu_buffer->write_stamp = *ts;
1133 /* let the caller know this was the commit */
1136 /* Darn, this is just wasted space */
1137 event->time_delta = 0;
1138 event->array[0] = 0;
1147 static struct ring_buffer_event *
1148 rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1149 unsigned type, unsigned long length)
1151 struct ring_buffer_event *event;
1158 * We allow for interrupts to reenter here and do a trace.
1159 * If one does, it will cause this original code to loop
1160 * back here. Even with heavy interrupts happening, this
1161 * should only happen a few times in a row. If this happens
1162 * 1000 times in a row, there must be either an interrupt
1163 * storm or we have something buggy.
1166 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
1169 ts = ring_buffer_time_stamp(cpu_buffer->cpu);
1172 * Only the first commit can update the timestamp.
1173 * Yes there is a race here. If an interrupt comes in
1174 * just after the conditional and it traces too, then it
1175 * will also check the deltas. More than one timestamp may
1176 * also be made. But only the entry that did the actual
1177 * commit will be something other than zero.
1179 if (cpu_buffer->tail_page == cpu_buffer->commit_page &&
1180 rb_page_write(cpu_buffer->tail_page) ==
1181 rb_commit_index(cpu_buffer)) {
1183 delta = ts - cpu_buffer->write_stamp;
1185 /* make sure this delta is calculated here */
1188 /* Did the write stamp get updated already? */
1189 if (unlikely(ts < cpu_buffer->write_stamp))
1192 if (test_time_stamp(delta)) {
1194 commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
1196 if (commit == -EBUSY)
1199 if (commit == -EAGAIN)
1202 RB_WARN_ON(cpu_buffer, commit < 0);
1205 /* Non commits have zero deltas */
1208 event = __rb_reserve_next(cpu_buffer, type, length, &ts);
1209 if (PTR_ERR(event) == -EAGAIN)
1213 if (unlikely(commit))
1215 * Ouch! We needed a timestamp and it was commited. But
1216 * we didn't get our event reserved.
1218 rb_set_commit_to_write(cpu_buffer);
1223 * If the timestamp was commited, make the commit our entry
1224 * now so that we will update it when needed.
1227 rb_set_commit_event(cpu_buffer, event);
1228 else if (!rb_is_commit(cpu_buffer, event))
1231 event->time_delta = delta;
1236 static DEFINE_PER_CPU(int, rb_need_resched);
1239 * ring_buffer_lock_reserve - reserve a part of the buffer
1240 * @buffer: the ring buffer to reserve from
1241 * @length: the length of the data to reserve (excluding event header)
1242 * @flags: a pointer to save the interrupt flags
1244 * Returns a reseverd event on the ring buffer to copy directly to.
1245 * The user of this interface will need to get the body to write into
1246 * and can use the ring_buffer_event_data() interface.
1248 * The length is the length of the data needed, not the event length
1249 * which also includes the event header.
1251 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
1252 * If NULL is returned, then nothing has been allocated or locked.
1254 struct ring_buffer_event *
1255 ring_buffer_lock_reserve(struct ring_buffer *buffer,
1256 unsigned long length,
1257 unsigned long *flags)
1259 struct ring_buffer_per_cpu *cpu_buffer;
1260 struct ring_buffer_event *event;
1263 if (ring_buffer_flags != RB_BUFFERS_ON)
1266 if (atomic_read(&buffer->record_disabled))
1269 /* If we are tracing schedule, we don't want to recurse */
1270 resched = ftrace_preempt_disable();
1272 cpu = raw_smp_processor_id();
1274 if (!cpu_isset(cpu, buffer->cpumask))
1277 cpu_buffer = buffer->buffers[cpu];
1279 if (atomic_read(&cpu_buffer->record_disabled))
1282 length = rb_calculate_event_length(length);
1283 if (length > BUF_PAGE_SIZE)
1286 event = rb_reserve_next_event(cpu_buffer, RINGBUF_TYPE_DATA, length);
1291 * Need to store resched state on this cpu.
1292 * Only the first needs to.
1295 if (preempt_count() == 1)
1296 per_cpu(rb_need_resched, cpu) = resched;
1301 ftrace_preempt_enable(resched);
1305 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1306 struct ring_buffer_event *event)
1308 cpu_buffer->entries++;
1310 /* Only process further if we own the commit */
1311 if (!rb_is_commit(cpu_buffer, event))
1314 cpu_buffer->write_stamp += event->time_delta;
1316 rb_set_commit_to_write(cpu_buffer);
1320 * ring_buffer_unlock_commit - commit a reserved
1321 * @buffer: The buffer to commit to
1322 * @event: The event pointer to commit.
1323 * @flags: the interrupt flags received from ring_buffer_lock_reserve.
1325 * This commits the data to the ring buffer, and releases any locks held.
1327 * Must be paired with ring_buffer_lock_reserve.
1329 int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1330 struct ring_buffer_event *event,
1331 unsigned long flags)
1333 struct ring_buffer_per_cpu *cpu_buffer;
1334 int cpu = raw_smp_processor_id();
1336 cpu_buffer = buffer->buffers[cpu];
1338 rb_commit(cpu_buffer, event);
1341 * Only the last preempt count needs to restore preemption.
1343 if (preempt_count() == 1)
1344 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1346 preempt_enable_no_resched_notrace();
1352 * ring_buffer_write - write data to the buffer without reserving
1353 * @buffer: The ring buffer to write to.
1354 * @length: The length of the data being written (excluding the event header)
1355 * @data: The data to write to the buffer.
1357 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
1358 * one function. If you already have the data to write to the buffer, it
1359 * may be easier to simply call this function.
1361 * Note, like ring_buffer_lock_reserve, the length is the length of the data
1362 * and not the length of the event which would hold the header.
1364 int ring_buffer_write(struct ring_buffer *buffer,
1365 unsigned long length,
1368 struct ring_buffer_per_cpu *cpu_buffer;
1369 struct ring_buffer_event *event;
1370 unsigned long event_length;
1375 if (ring_buffer_flags != RB_BUFFERS_ON)
1378 if (atomic_read(&buffer->record_disabled))
1381 resched = ftrace_preempt_disable();
1383 cpu = raw_smp_processor_id();
1385 if (!cpu_isset(cpu, buffer->cpumask))
1388 cpu_buffer = buffer->buffers[cpu];
1390 if (atomic_read(&cpu_buffer->record_disabled))
1393 event_length = rb_calculate_event_length(length);
1394 event = rb_reserve_next_event(cpu_buffer,
1395 RINGBUF_TYPE_DATA, event_length);
1399 body = rb_event_data(event);
1401 memcpy(body, data, length);
1403 rb_commit(cpu_buffer, event);
1407 ftrace_preempt_enable(resched);
1412 static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1414 struct buffer_page *reader = cpu_buffer->reader_page;
1415 struct buffer_page *head = cpu_buffer->head_page;
1416 struct buffer_page *commit = cpu_buffer->commit_page;
1418 return reader->read == rb_page_commit(reader) &&
1419 (commit == reader ||
1421 head->read == rb_page_commit(commit)));
1425 * ring_buffer_record_disable - stop all writes into the buffer
1426 * @buffer: The ring buffer to stop writes to.
1428 * This prevents all writes to the buffer. Any attempt to write
1429 * to the buffer after this will fail and return NULL.
1431 * The caller should call synchronize_sched() after this.
1433 void ring_buffer_record_disable(struct ring_buffer *buffer)
1435 atomic_inc(&buffer->record_disabled);
1439 * ring_buffer_record_enable - enable writes to the buffer
1440 * @buffer: The ring buffer to enable writes
1442 * Note, multiple disables will need the same number of enables
1443 * to truely enable the writing (much like preempt_disable).
1445 void ring_buffer_record_enable(struct ring_buffer *buffer)
1447 atomic_dec(&buffer->record_disabled);
1451 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1452 * @buffer: The ring buffer to stop writes to.
1453 * @cpu: The CPU buffer to stop
1455 * This prevents all writes to the buffer. Any attempt to write
1456 * to the buffer after this will fail and return NULL.
1458 * The caller should call synchronize_sched() after this.
1460 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1462 struct ring_buffer_per_cpu *cpu_buffer;
1464 if (!cpu_isset(cpu, buffer->cpumask))
1467 cpu_buffer = buffer->buffers[cpu];
1468 atomic_inc(&cpu_buffer->record_disabled);
1472 * ring_buffer_record_enable_cpu - enable writes to the buffer
1473 * @buffer: The ring buffer to enable writes
1474 * @cpu: The CPU to enable.
1476 * Note, multiple disables will need the same number of enables
1477 * to truely enable the writing (much like preempt_disable).
1479 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1481 struct ring_buffer_per_cpu *cpu_buffer;
1483 if (!cpu_isset(cpu, buffer->cpumask))
1486 cpu_buffer = buffer->buffers[cpu];
1487 atomic_dec(&cpu_buffer->record_disabled);
1491 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1492 * @buffer: The ring buffer
1493 * @cpu: The per CPU buffer to get the entries from.
1495 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1497 struct ring_buffer_per_cpu *cpu_buffer;
1499 if (!cpu_isset(cpu, buffer->cpumask))
1502 cpu_buffer = buffer->buffers[cpu];
1503 return cpu_buffer->entries;
1507 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1508 * @buffer: The ring buffer
1509 * @cpu: The per CPU buffer to get the number of overruns from
1511 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1513 struct ring_buffer_per_cpu *cpu_buffer;
1515 if (!cpu_isset(cpu, buffer->cpumask))
1518 cpu_buffer = buffer->buffers[cpu];
1519 return cpu_buffer->overrun;
1523 * ring_buffer_entries - get the number of entries in a buffer
1524 * @buffer: The ring buffer
1526 * Returns the total number of entries in the ring buffer
1529 unsigned long ring_buffer_entries(struct ring_buffer *buffer)
1531 struct ring_buffer_per_cpu *cpu_buffer;
1532 unsigned long entries = 0;
1535 /* if you care about this being correct, lock the buffer */
1536 for_each_buffer_cpu(buffer, cpu) {
1537 cpu_buffer = buffer->buffers[cpu];
1538 entries += cpu_buffer->entries;
1545 * ring_buffer_overrun_cpu - get the number of overruns in buffer
1546 * @buffer: The ring buffer
1548 * Returns the total number of overruns in the ring buffer
1551 unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
1553 struct ring_buffer_per_cpu *cpu_buffer;
1554 unsigned long overruns = 0;
1557 /* if you care about this being correct, lock the buffer */
1558 for_each_buffer_cpu(buffer, cpu) {
1559 cpu_buffer = buffer->buffers[cpu];
1560 overruns += cpu_buffer->overrun;
1566 static void rb_iter_reset(struct ring_buffer_iter *iter)
1568 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1570 /* Iterator usage is expected to have record disabled */
1571 if (list_empty(&cpu_buffer->reader_page->list)) {
1572 iter->head_page = cpu_buffer->head_page;
1573 iter->head = cpu_buffer->head_page->read;
1575 iter->head_page = cpu_buffer->reader_page;
1576 iter->head = cpu_buffer->reader_page->read;
1579 iter->read_stamp = cpu_buffer->read_stamp;
1581 iter->read_stamp = iter->head_page->page->time_stamp;
1585 * ring_buffer_iter_reset - reset an iterator
1586 * @iter: The iterator to reset
1588 * Resets the iterator, so that it will start from the beginning
1591 void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1593 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1594 unsigned long flags;
1596 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1597 rb_iter_reset(iter);
1598 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1602 * ring_buffer_iter_empty - check if an iterator has no more to read
1603 * @iter: The iterator to check
1605 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
1607 struct ring_buffer_per_cpu *cpu_buffer;
1609 cpu_buffer = iter->cpu_buffer;
1611 return iter->head_page == cpu_buffer->commit_page &&
1612 iter->head == rb_commit_index(cpu_buffer);
1616 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1617 struct ring_buffer_event *event)
1621 switch (event->type) {
1622 case RINGBUF_TYPE_PADDING:
1625 case RINGBUF_TYPE_TIME_EXTEND:
1626 delta = event->array[0];
1628 delta += event->time_delta;
1629 cpu_buffer->read_stamp += delta;
1632 case RINGBUF_TYPE_TIME_STAMP:
1633 /* FIXME: not implemented */
1636 case RINGBUF_TYPE_DATA:
1637 cpu_buffer->read_stamp += event->time_delta;
1647 rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
1648 struct ring_buffer_event *event)
1652 switch (event->type) {
1653 case RINGBUF_TYPE_PADDING:
1656 case RINGBUF_TYPE_TIME_EXTEND:
1657 delta = event->array[0];
1659 delta += event->time_delta;
1660 iter->read_stamp += delta;
1663 case RINGBUF_TYPE_TIME_STAMP:
1664 /* FIXME: not implemented */
1667 case RINGBUF_TYPE_DATA:
1668 iter->read_stamp += event->time_delta;
1677 static struct buffer_page *
1678 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1680 struct buffer_page *reader = NULL;
1681 unsigned long flags;
1684 local_irq_save(flags);
1685 __raw_spin_lock(&cpu_buffer->lock);
1689 * This should normally only loop twice. But because the
1690 * start of the reader inserts an empty page, it causes
1691 * a case where we will loop three times. There should be no
1692 * reason to loop four times (that I know of).
1694 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
1699 reader = cpu_buffer->reader_page;
1701 /* If there's more to read, return this page */
1702 if (cpu_buffer->reader_page->read < rb_page_size(reader))
1705 /* Never should we have an index greater than the size */
1706 if (RB_WARN_ON(cpu_buffer,
1707 cpu_buffer->reader_page->read > rb_page_size(reader)))
1710 /* check if we caught up to the tail */
1712 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
1716 * Splice the empty reader page into the list around the head.
1717 * Reset the reader page to size zero.
1720 reader = cpu_buffer->head_page;
1721 cpu_buffer->reader_page->list.next = reader->list.next;
1722 cpu_buffer->reader_page->list.prev = reader->list.prev;
1724 local_set(&cpu_buffer->reader_page->write, 0);
1725 local_set(&cpu_buffer->reader_page->page->commit, 0);
1727 /* Make the reader page now replace the head */
1728 reader->list.prev->next = &cpu_buffer->reader_page->list;
1729 reader->list.next->prev = &cpu_buffer->reader_page->list;
1732 * If the tail is on the reader, then we must set the head
1733 * to the inserted page, otherwise we set it one before.
1735 cpu_buffer->head_page = cpu_buffer->reader_page;
1737 if (cpu_buffer->commit_page != reader)
1738 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
1740 /* Finally update the reader page to the new head */
1741 cpu_buffer->reader_page = reader;
1742 rb_reset_reader_page(cpu_buffer);
1747 __raw_spin_unlock(&cpu_buffer->lock);
1748 local_irq_restore(flags);
1753 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
1755 struct ring_buffer_event *event;
1756 struct buffer_page *reader;
1759 reader = rb_get_reader_page(cpu_buffer);
1761 /* This function should not be called when buffer is empty */
1762 if (RB_WARN_ON(cpu_buffer, !reader))
1765 event = rb_reader_event(cpu_buffer);
1767 if (event->type == RINGBUF_TYPE_DATA)
1768 cpu_buffer->entries--;
1770 rb_update_read_stamp(cpu_buffer, event);
1772 length = rb_event_length(event);
1773 cpu_buffer->reader_page->read += length;
1776 static void rb_advance_iter(struct ring_buffer_iter *iter)
1778 struct ring_buffer *buffer;
1779 struct ring_buffer_per_cpu *cpu_buffer;
1780 struct ring_buffer_event *event;
1783 cpu_buffer = iter->cpu_buffer;
1784 buffer = cpu_buffer->buffer;
1787 * Check if we are at the end of the buffer.
1789 if (iter->head >= rb_page_size(iter->head_page)) {
1790 if (RB_WARN_ON(buffer,
1791 iter->head_page == cpu_buffer->commit_page))
1797 event = rb_iter_head_event(iter);
1799 length = rb_event_length(event);
1802 * This should not be called to advance the header if we are
1803 * at the tail of the buffer.
1805 if (RB_WARN_ON(cpu_buffer,
1806 (iter->head_page == cpu_buffer->commit_page) &&
1807 (iter->head + length > rb_commit_index(cpu_buffer))))
1810 rb_update_iter_read_stamp(iter, event);
1812 iter->head += length;
1814 /* check for end of page padding */
1815 if ((iter->head >= rb_page_size(iter->head_page)) &&
1816 (iter->head_page != cpu_buffer->commit_page))
1817 rb_advance_iter(iter);
1820 static struct ring_buffer_event *
1821 rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1823 struct ring_buffer_per_cpu *cpu_buffer;
1824 struct ring_buffer_event *event;
1825 struct buffer_page *reader;
1828 if (!cpu_isset(cpu, buffer->cpumask))
1831 cpu_buffer = buffer->buffers[cpu];
1835 * We repeat when a timestamp is encountered. It is possible
1836 * to get multiple timestamps from an interrupt entering just
1837 * as one timestamp is about to be written. The max times
1838 * that this can happen is the number of nested interrupts we
1839 * can have. Nesting 10 deep of interrupts is clearly
1842 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
1845 reader = rb_get_reader_page(cpu_buffer);
1849 event = rb_reader_event(cpu_buffer);
1851 switch (event->type) {
1852 case RINGBUF_TYPE_PADDING:
1853 RB_WARN_ON(cpu_buffer, 1);
1854 rb_advance_reader(cpu_buffer);
1857 case RINGBUF_TYPE_TIME_EXTEND:
1858 /* Internal data, OK to advance */
1859 rb_advance_reader(cpu_buffer);
1862 case RINGBUF_TYPE_TIME_STAMP:
1863 /* FIXME: not implemented */
1864 rb_advance_reader(cpu_buffer);
1867 case RINGBUF_TYPE_DATA:
1869 *ts = cpu_buffer->read_stamp + event->time_delta;
1870 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1881 static struct ring_buffer_event *
1882 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1884 struct ring_buffer *buffer;
1885 struct ring_buffer_per_cpu *cpu_buffer;
1886 struct ring_buffer_event *event;
1889 if (ring_buffer_iter_empty(iter))
1892 cpu_buffer = iter->cpu_buffer;
1893 buffer = cpu_buffer->buffer;
1897 * We repeat when a timestamp is encountered. It is possible
1898 * to get multiple timestamps from an interrupt entering just
1899 * as one timestamp is about to be written. The max times
1900 * that this can happen is the number of nested interrupts we
1901 * can have. Nesting 10 deep of interrupts is clearly
1904 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
1907 if (rb_per_cpu_empty(cpu_buffer))
1910 event = rb_iter_head_event(iter);
1912 switch (event->type) {
1913 case RINGBUF_TYPE_PADDING:
1917 case RINGBUF_TYPE_TIME_EXTEND:
1918 /* Internal data, OK to advance */
1919 rb_advance_iter(iter);
1922 case RINGBUF_TYPE_TIME_STAMP:
1923 /* FIXME: not implemented */
1924 rb_advance_iter(iter);
1927 case RINGBUF_TYPE_DATA:
1929 *ts = iter->read_stamp + event->time_delta;
1930 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1942 * ring_buffer_peek - peek at the next event to be read
1943 * @buffer: The ring buffer to read
1944 * @cpu: The cpu to peak at
1945 * @ts: The timestamp counter of this event.
1947 * This will return the event that will be read next, but does
1948 * not consume the data.
1950 struct ring_buffer_event *
1951 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1953 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
1954 struct ring_buffer_event *event;
1955 unsigned long flags;
1957 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1958 event = rb_buffer_peek(buffer, cpu, ts);
1959 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1965 * ring_buffer_iter_peek - peek at the next event to be read
1966 * @iter: The ring buffer iterator
1967 * @ts: The timestamp counter of this event.
1969 * This will return the event that will be read next, but does
1970 * not increment the iterator.
1972 struct ring_buffer_event *
1973 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1975 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1976 struct ring_buffer_event *event;
1977 unsigned long flags;
1979 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1980 event = rb_iter_peek(iter, ts);
1981 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1987 * ring_buffer_consume - return an event and consume it
1988 * @buffer: The ring buffer to get the next event from
1990 * Returns the next event in the ring buffer, and that event is consumed.
1991 * Meaning, that sequential reads will keep returning a different event,
1992 * and eventually empty the ring buffer if the producer is slower.
1994 struct ring_buffer_event *
1995 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
1997 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
1998 struct ring_buffer_event *event;
1999 unsigned long flags;
2001 if (!cpu_isset(cpu, buffer->cpumask))
2004 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2006 event = rb_buffer_peek(buffer, cpu, ts);
2010 rb_advance_reader(cpu_buffer);
2013 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2019 * ring_buffer_read_start - start a non consuming read of the buffer
2020 * @buffer: The ring buffer to read from
2021 * @cpu: The cpu buffer to iterate over
2023 * This starts up an iteration through the buffer. It also disables
2024 * the recording to the buffer until the reading is finished.
2025 * This prevents the reading from being corrupted. This is not
2026 * a consuming read, so a producer is not expected.
2028 * Must be paired with ring_buffer_finish.
2030 struct ring_buffer_iter *
2031 ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
2033 struct ring_buffer_per_cpu *cpu_buffer;
2034 struct ring_buffer_iter *iter;
2035 unsigned long flags;
2037 if (!cpu_isset(cpu, buffer->cpumask))
2040 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
2044 cpu_buffer = buffer->buffers[cpu];
2046 iter->cpu_buffer = cpu_buffer;
2048 atomic_inc(&cpu_buffer->record_disabled);
2049 synchronize_sched();
2051 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2052 __raw_spin_lock(&cpu_buffer->lock);
2053 rb_iter_reset(iter);
2054 __raw_spin_unlock(&cpu_buffer->lock);
2055 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2061 * ring_buffer_finish - finish reading the iterator of the buffer
2062 * @iter: The iterator retrieved by ring_buffer_start
2064 * This re-enables the recording to the buffer, and frees the
2068 ring_buffer_read_finish(struct ring_buffer_iter *iter)
2070 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2072 atomic_dec(&cpu_buffer->record_disabled);
2077 * ring_buffer_read - read the next item in the ring buffer by the iterator
2078 * @iter: The ring buffer iterator
2079 * @ts: The time stamp of the event read.
2081 * This reads the next event in the ring buffer and increments the iterator.
2083 struct ring_buffer_event *
2084 ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
2086 struct ring_buffer_event *event;
2087 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2088 unsigned long flags;
2090 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2091 event = rb_iter_peek(iter, ts);
2095 rb_advance_iter(iter);
2097 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2103 * ring_buffer_size - return the size of the ring buffer (in bytes)
2104 * @buffer: The ring buffer.
2106 unsigned long ring_buffer_size(struct ring_buffer *buffer)
2108 return BUF_PAGE_SIZE * buffer->pages;
2112 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
2114 cpu_buffer->head_page
2115 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
2116 local_set(&cpu_buffer->head_page->write, 0);
2117 local_set(&cpu_buffer->head_page->page->commit, 0);
2119 cpu_buffer->head_page->read = 0;
2121 cpu_buffer->tail_page = cpu_buffer->head_page;
2122 cpu_buffer->commit_page = cpu_buffer->head_page;
2124 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
2125 local_set(&cpu_buffer->reader_page->write, 0);
2126 local_set(&cpu_buffer->reader_page->page->commit, 0);
2127 cpu_buffer->reader_page->read = 0;
2129 cpu_buffer->overrun = 0;
2130 cpu_buffer->entries = 0;
2134 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
2135 * @buffer: The ring buffer to reset a per cpu buffer of
2136 * @cpu: The CPU buffer to be reset
2138 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2140 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2141 unsigned long flags;
2143 if (!cpu_isset(cpu, buffer->cpumask))
2146 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2148 __raw_spin_lock(&cpu_buffer->lock);
2150 rb_reset_cpu(cpu_buffer);
2152 __raw_spin_unlock(&cpu_buffer->lock);
2154 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2158 * ring_buffer_reset - reset a ring buffer
2159 * @buffer: The ring buffer to reset all cpu buffers
2161 void ring_buffer_reset(struct ring_buffer *buffer)
2165 for_each_buffer_cpu(buffer, cpu)
2166 ring_buffer_reset_cpu(buffer, cpu);
2170 * rind_buffer_empty - is the ring buffer empty?
2171 * @buffer: The ring buffer to test
2173 int ring_buffer_empty(struct ring_buffer *buffer)
2175 struct ring_buffer_per_cpu *cpu_buffer;
2178 /* yes this is racy, but if you don't like the race, lock the buffer */
2179 for_each_buffer_cpu(buffer, cpu) {
2180 cpu_buffer = buffer->buffers[cpu];
2181 if (!rb_per_cpu_empty(cpu_buffer))
2188 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
2189 * @buffer: The ring buffer
2190 * @cpu: The CPU buffer to test
2192 int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2194 struct ring_buffer_per_cpu *cpu_buffer;
2196 if (!cpu_isset(cpu, buffer->cpumask))
2199 cpu_buffer = buffer->buffers[cpu];
2200 return rb_per_cpu_empty(cpu_buffer);
2204 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
2205 * @buffer_a: One buffer to swap with
2206 * @buffer_b: The other buffer to swap with
2208 * This function is useful for tracers that want to take a "snapshot"
2209 * of a CPU buffer and has another back up buffer lying around.
2210 * it is expected that the tracer handles the cpu buffer not being
2211 * used at the moment.
2213 int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2214 struct ring_buffer *buffer_b, int cpu)
2216 struct ring_buffer_per_cpu *cpu_buffer_a;
2217 struct ring_buffer_per_cpu *cpu_buffer_b;
2219 if (!cpu_isset(cpu, buffer_a->cpumask) ||
2220 !cpu_isset(cpu, buffer_b->cpumask))
2223 /* At least make sure the two buffers are somewhat the same */
2224 if (buffer_a->pages != buffer_b->pages)
2227 cpu_buffer_a = buffer_a->buffers[cpu];
2228 cpu_buffer_b = buffer_b->buffers[cpu];
2231 * We can't do a synchronize_sched here because this
2232 * function can be called in atomic context.
2233 * Normally this will be called from the same CPU as cpu.
2234 * If not it's up to the caller to protect this.
2236 atomic_inc(&cpu_buffer_a->record_disabled);
2237 atomic_inc(&cpu_buffer_b->record_disabled);
2239 buffer_a->buffers[cpu] = cpu_buffer_b;
2240 buffer_b->buffers[cpu] = cpu_buffer_a;
2242 cpu_buffer_b->buffer = buffer_a;
2243 cpu_buffer_a->buffer = buffer_b;
2245 atomic_dec(&cpu_buffer_a->record_disabled);
2246 atomic_dec(&cpu_buffer_b->record_disabled);
2251 static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
2252 struct buffer_data_page *bpage)
2254 struct ring_buffer_event *event;
2257 __raw_spin_lock(&cpu_buffer->lock);
2258 for (head = 0; head < local_read(&bpage->commit);
2259 head += rb_event_length(event)) {
2261 event = __rb_data_page_index(bpage, head);
2262 if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
2264 /* Only count data entries */
2265 if (event->type != RINGBUF_TYPE_DATA)
2267 cpu_buffer->entries--;
2269 __raw_spin_unlock(&cpu_buffer->lock);
2273 * ring_buffer_alloc_read_page - allocate a page to read from buffer
2274 * @buffer: the buffer to allocate for.
2276 * This function is used in conjunction with ring_buffer_read_page.
2277 * When reading a full page from the ring buffer, these functions
2278 * can be used to speed up the process. The calling function should
2279 * allocate a few pages first with this function. Then when it
2280 * needs to get pages from the ring buffer, it passes the result
2281 * of this function into ring_buffer_read_page, which will swap
2282 * the page that was allocated, with the read page of the buffer.
2285 * The page allocated, or NULL on error.
2287 void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
2290 struct buffer_data_page *bpage;
2292 addr = __get_free_page(GFP_KERNEL);
2296 bpage = (void *)addr;
2302 * ring_buffer_free_read_page - free an allocated read page
2303 * @buffer: the buffer the page was allocate for
2304 * @data: the page to free
2306 * Free a page allocated from ring_buffer_alloc_read_page.
2308 void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
2310 free_page((unsigned long)data);
2314 * ring_buffer_read_page - extract a page from the ring buffer
2315 * @buffer: buffer to extract from
2316 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
2317 * @cpu: the cpu of the buffer to extract
2318 * @full: should the extraction only happen when the page is full.
2320 * This function will pull out a page from the ring buffer and consume it.
2321 * @data_page must be the address of the variable that was returned
2322 * from ring_buffer_alloc_read_page. This is because the page might be used
2323 * to swap with a page in the ring buffer.
2326 * rpage = ring_buffer_alloc_page(buffer);
2329 * ret = ring_buffer_read_page(buffer, &rpage, cpu, 0);
2331 * process_page(rpage);
2333 * When @full is set, the function will not return true unless
2334 * the writer is off the reader page.
2336 * Note: it is up to the calling functions to handle sleeps and wakeups.
2337 * The ring buffer can be used anywhere in the kernel and can not
2338 * blindly call wake_up. The layer that uses the ring buffer must be
2339 * responsible for that.
2342 * 1 if data has been transferred
2343 * 0 if no data has been transferred.
2345 int ring_buffer_read_page(struct ring_buffer *buffer,
2346 void **data_page, int cpu, int full)
2348 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2349 struct ring_buffer_event *event;
2350 struct buffer_data_page *bpage;
2351 unsigned long flags;
2361 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2364 * rb_buffer_peek will get the next ring buffer if
2365 * the current reader page is empty.
2367 event = rb_buffer_peek(buffer, cpu, NULL);
2371 /* check for data */
2372 if (!local_read(&cpu_buffer->reader_page->page->commit))
2375 * If the writer is already off of the read page, then simply
2376 * switch the read page with the given page. Otherwise
2377 * we need to copy the data from the reader to the writer.
2379 if (cpu_buffer->reader_page == cpu_buffer->commit_page) {
2380 unsigned int read = cpu_buffer->reader_page->read;
2384 /* The writer is still on the reader page, we must copy */
2385 bpage = cpu_buffer->reader_page->page;
2387 cpu_buffer->reader_page->page->data + read,
2388 local_read(&bpage->commit) - read);
2390 /* consume what was read */
2391 cpu_buffer->reader_page += read;
2394 /* swap the pages */
2395 rb_init_page(bpage);
2396 bpage = cpu_buffer->reader_page->page;
2397 cpu_buffer->reader_page->page = *data_page;
2398 cpu_buffer->reader_page->read = 0;
2403 /* update the entry counter */
2404 rb_remove_entries(cpu_buffer, bpage);
2406 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2412 rb_simple_read(struct file *filp, char __user *ubuf,
2413 size_t cnt, loff_t *ppos)
2415 long *p = filp->private_data;
2419 if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
2420 r = sprintf(buf, "permanently disabled\n");
2422 r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
2424 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2428 rb_simple_write(struct file *filp, const char __user *ubuf,
2429 size_t cnt, loff_t *ppos)
2431 long *p = filp->private_data;
2436 if (cnt >= sizeof(buf))
2439 if (copy_from_user(&buf, ubuf, cnt))
2444 ret = strict_strtoul(buf, 10, &val);
2449 set_bit(RB_BUFFERS_ON_BIT, p);
2451 clear_bit(RB_BUFFERS_ON_BIT, p);
2458 static struct file_operations rb_simple_fops = {
2459 .open = tracing_open_generic,
2460 .read = rb_simple_read,
2461 .write = rb_simple_write,
2465 static __init int rb_init_debugfs(void)
2467 struct dentry *d_tracer;
2468 struct dentry *entry;
2470 d_tracer = tracing_init_dentry();
2472 entry = debugfs_create_file("tracing_on", 0644, d_tracer,
2473 &ring_buffer_flags, &rb_simple_fops);
2475 pr_warning("Could not create debugfs 'tracing_on' entry\n");
2480 fs_initcall(rb_init_debugfs);