4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
6 #include <linux/ring_buffer.h>
7 #include <linux/trace_clock.h>
8 #include <linux/ftrace_irq.h>
9 #include <linux/spinlock.h>
10 #include <linux/debugfs.h>
11 #include <linux/uaccess.h>
12 #include <linux/hardirq.h>
13 #include <linux/module.h>
14 #include <linux/percpu.h>
15 #include <linux/mutex.h>
16 #include <linux/init.h>
17 #include <linux/hash.h>
18 #include <linux/list.h>
19 #include <linux/cpu.h>
25 * The ring buffer is made up of a list of pages. A separate list of pages is
26 * allocated for each CPU. A writer may only write to a buffer that is
27 * associated with the CPU it is currently executing on. A reader may read
28 * from any per cpu buffer.
30 * The reader is special. For each per cpu buffer, the reader has its own
31 * reader page. When a reader has read the entire reader page, this reader
32 * page is swapped with another page in the ring buffer.
34 * Now, as long as the writer is off the reader page, the reader can do what
35 * ever it wants with that page. The writer will never write to that page
36 * again (as long as it is out of the ring buffer).
38 * Here's some silly ASCII art.
41 * |reader| RING BUFFER
43 * +------+ +---+ +---+ +---+
52 * |reader| RING BUFFER
53 * |page |------------------v
54 * +------+ +---+ +---+ +---+
63 * |reader| RING BUFFER
64 * |page |------------------v
65 * +------+ +---+ +---+ +---+
70 * +------------------------------+
74 * |buffer| RING BUFFER
75 * |page |------------------v
76 * +------+ +---+ +---+ +---+
78 * | New +---+ +---+ +---+
81 * +------------------------------+
84 * After we make this swap, the reader can hand this page off to the splice
85 * code and be done with it. It can even allocate a new page if it needs to
86 * and swap that into the ring buffer.
88 * We will be using cmpxchg soon to make all this lockless.
93 * A fast way to enable or disable all ring buffers is to
94 * call tracing_on or tracing_off. Turning off the ring buffers
95 * prevents all ring buffers from being recorded to.
96 * Turning this switch on, makes it OK to write to the
97 * ring buffer, if the ring buffer is enabled itself.
99 * There's three layers that must be on in order to write
100 * to the ring buffer.
102 * 1) This global flag must be set.
103 * 2) The ring buffer must be enabled for recording.
104 * 3) The per cpu buffer must be enabled for recording.
106 * In case of an anomaly, this global flag has a bit set that
107 * will permantly disable all ring buffers.
111 * Global flag to disable all recording to ring buffers
112 * This has two bits: ON, DISABLED
116 * 0 0 : ring buffers are off
117 * 1 0 : ring buffers are on
118 * X 1 : ring buffers are permanently disabled
122 RB_BUFFERS_ON_BIT = 0,
123 RB_BUFFERS_DISABLED_BIT = 1,
127 RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
128 RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
131 static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
133 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
136 * tracing_on - enable all tracing buffers
138 * This function enables all tracing buffers that may have been
139 * disabled with tracing_off.
141 void tracing_on(void)
143 set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
145 EXPORT_SYMBOL_GPL(tracing_on);
148 * tracing_off - turn off all tracing buffers
150 * This function stops all tracing buffers from recording data.
151 * It does not disable any overhead the tracers themselves may
152 * be causing. This function simply causes all recording to
153 * the ring buffers to fail.
155 void tracing_off(void)
157 clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
159 EXPORT_SYMBOL_GPL(tracing_off);
162 * tracing_off_permanent - permanently disable ring buffers
164 * This function, once called, will disable all ring buffers
167 void tracing_off_permanent(void)
169 set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
173 * tracing_is_on - show state of ring buffers enabled
175 int tracing_is_on(void)
177 return ring_buffer_flags == RB_BUFFERS_ON;
179 EXPORT_SYMBOL_GPL(tracing_is_on);
183 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
184 #define RB_ALIGNMENT 4U
185 #define RB_MAX_SMALL_DATA 28
188 RB_LEN_TIME_EXTEND = 8,
189 RB_LEN_TIME_STAMP = 16,
192 /* inline for ring buffer fast paths */
194 rb_event_length(struct ring_buffer_event *event)
198 switch (event->type) {
199 case RINGBUF_TYPE_PADDING:
203 case RINGBUF_TYPE_TIME_EXTEND:
204 return RB_LEN_TIME_EXTEND;
206 case RINGBUF_TYPE_TIME_STAMP:
207 return RB_LEN_TIME_STAMP;
209 case RINGBUF_TYPE_DATA:
211 length = event->len * RB_ALIGNMENT;
213 length = event->array[0];
214 return length + RB_EVNT_HDR_SIZE;
223 * ring_buffer_event_length - return the length of the event
224 * @event: the event to get the length of
226 unsigned ring_buffer_event_length(struct ring_buffer_event *event)
228 unsigned length = rb_event_length(event);
229 if (event->type != RINGBUF_TYPE_DATA)
231 length -= RB_EVNT_HDR_SIZE;
232 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
233 length -= sizeof(event->array[0]);
236 EXPORT_SYMBOL_GPL(ring_buffer_event_length);
238 /* inline for ring buffer fast paths */
240 rb_event_data(struct ring_buffer_event *event)
242 BUG_ON(event->type != RINGBUF_TYPE_DATA);
243 /* If length is in len field, then array[0] has the data */
245 return (void *)&event->array[0];
246 /* Otherwise length is in array[0] and array[1] has the data */
247 return (void *)&event->array[1];
251 * ring_buffer_event_data - return the data of the event
252 * @event: the event to get the data from
254 void *ring_buffer_event_data(struct ring_buffer_event *event)
256 return rb_event_data(event);
258 EXPORT_SYMBOL_GPL(ring_buffer_event_data);
260 #define for_each_buffer_cpu(buffer, cpu) \
261 for_each_cpu(cpu, buffer->cpumask)
264 #define TS_MASK ((1ULL << TS_SHIFT) - 1)
265 #define TS_DELTA_TEST (~TS_MASK)
267 struct buffer_data_page {
268 u64 time_stamp; /* page time stamp */
269 local_t commit; /* write committed index */
270 unsigned char data[]; /* data of buffer page */
274 local_t write; /* index for next write */
275 unsigned read; /* index for next read */
276 struct list_head list; /* list of free pages */
277 struct buffer_data_page *page; /* Actual data page */
280 static void rb_init_page(struct buffer_data_page *bpage)
282 local_set(&bpage->commit, 0);
286 * ring_buffer_page_len - the size of data on the page.
287 * @page: The page to read
289 * Returns the amount of data on the page, including buffer page header.
291 size_t ring_buffer_page_len(void *page)
293 return local_read(&((struct buffer_data_page *)page)->commit)
298 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
301 static void free_buffer_page(struct buffer_page *bpage)
303 free_page((unsigned long)bpage->page);
308 * We need to fit the time_stamp delta into 27 bits.
310 static inline int test_time_stamp(u64 delta)
312 if (delta & TS_DELTA_TEST)
317 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
320 * head_page == tail_page && head == tail then buffer is empty.
322 struct ring_buffer_per_cpu {
324 struct ring_buffer *buffer;
325 spinlock_t reader_lock; /* serialize readers */
327 struct lock_class_key lock_key;
328 struct list_head pages;
329 struct buffer_page *head_page; /* read from head */
330 struct buffer_page *tail_page; /* write to tail */
331 struct buffer_page *commit_page; /* committed pages */
332 struct buffer_page *reader_page;
333 unsigned long overrun;
334 unsigned long entries;
337 atomic_t record_disabled;
344 atomic_t record_disabled;
345 cpumask_var_t cpumask;
349 struct ring_buffer_per_cpu **buffers;
351 #ifdef CONFIG_HOTPLUG_CPU
352 struct notifier_block cpu_notify;
357 struct ring_buffer_iter {
358 struct ring_buffer_per_cpu *cpu_buffer;
360 struct buffer_page *head_page;
364 /* buffer may be either ring_buffer or ring_buffer_per_cpu */
365 #define RB_WARN_ON(buffer, cond) \
367 int _____ret = unlikely(cond); \
369 atomic_inc(&buffer->record_disabled); \
375 /* Up this if you want to test the TIME_EXTENTS and normalization */
376 #define DEBUG_SHIFT 0
378 u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
382 preempt_disable_notrace();
383 /* shift to debug/test normalization and TIME_EXTENTS */
384 time = buffer->clock() << DEBUG_SHIFT;
385 preempt_enable_no_resched_notrace();
389 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
391 void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
394 /* Just stupid testing the normalize function and deltas */
397 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
400 * check_pages - integrity check of buffer pages
401 * @cpu_buffer: CPU buffer with pages to test
403 * As a safety measure we check to make sure the data pages have not
406 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
408 struct list_head *head = &cpu_buffer->pages;
409 struct buffer_page *bpage, *tmp;
411 if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
413 if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
416 list_for_each_entry_safe(bpage, tmp, head, list) {
417 if (RB_WARN_ON(cpu_buffer,
418 bpage->list.next->prev != &bpage->list))
420 if (RB_WARN_ON(cpu_buffer,
421 bpage->list.prev->next != &bpage->list))
428 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
431 struct list_head *head = &cpu_buffer->pages;
432 struct buffer_page *bpage, *tmp;
437 for (i = 0; i < nr_pages; i++) {
438 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
439 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
442 list_add(&bpage->list, &pages);
444 addr = __get_free_page(GFP_KERNEL);
447 bpage->page = (void *)addr;
448 rb_init_page(bpage->page);
451 list_splice(&pages, head);
453 rb_check_pages(cpu_buffer);
458 list_for_each_entry_safe(bpage, tmp, &pages, list) {
459 list_del_init(&bpage->list);
460 free_buffer_page(bpage);
465 static struct ring_buffer_per_cpu *
466 rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
468 struct ring_buffer_per_cpu *cpu_buffer;
469 struct buffer_page *bpage;
473 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
474 GFP_KERNEL, cpu_to_node(cpu));
478 cpu_buffer->cpu = cpu;
479 cpu_buffer->buffer = buffer;
480 spin_lock_init(&cpu_buffer->reader_lock);
481 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
482 INIT_LIST_HEAD(&cpu_buffer->pages);
484 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
485 GFP_KERNEL, cpu_to_node(cpu));
487 goto fail_free_buffer;
489 cpu_buffer->reader_page = bpage;
490 addr = __get_free_page(GFP_KERNEL);
492 goto fail_free_reader;
493 bpage->page = (void *)addr;
494 rb_init_page(bpage->page);
496 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
498 ret = rb_allocate_pages(cpu_buffer, buffer->pages);
500 goto fail_free_reader;
502 cpu_buffer->head_page
503 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
504 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
509 free_buffer_page(cpu_buffer->reader_page);
516 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
518 struct list_head *head = &cpu_buffer->pages;
519 struct buffer_page *bpage, *tmp;
521 list_del_init(&cpu_buffer->reader_page->list);
522 free_buffer_page(cpu_buffer->reader_page);
524 list_for_each_entry_safe(bpage, tmp, head, list) {
525 list_del_init(&bpage->list);
526 free_buffer_page(bpage);
532 * Causes compile errors if the struct buffer_page gets bigger
533 * than the struct page.
535 extern int ring_buffer_page_too_big(void);
537 #ifdef CONFIG_HOTPLUG_CPU
538 static int rb_cpu_notify(struct notifier_block *self,
539 unsigned long action, void *hcpu);
543 * ring_buffer_alloc - allocate a new ring_buffer
544 * @size: the size in bytes per cpu that is needed.
545 * @flags: attributes to set for the ring buffer.
547 * Currently the only flag that is available is the RB_FL_OVERWRITE
548 * flag. This flag means that the buffer will overwrite old data
549 * when the buffer wraps. If this flag is not set, the buffer will
550 * drop data when the tail hits the head.
552 struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
554 struct ring_buffer *buffer;
558 /* Paranoid! Optimizes out when all is well */
559 if (sizeof(struct buffer_page) > sizeof(struct page))
560 ring_buffer_page_too_big();
563 /* keep it in its own cache line */
564 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
569 if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
570 goto fail_free_buffer;
572 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
573 buffer->flags = flags;
574 buffer->clock = trace_clock_local;
576 /* need at least two pages */
577 if (buffer->pages == 1)
581 * In case of non-hotplug cpu, if the ring-buffer is allocated
582 * in early initcall, it will not be notified of secondary cpus.
583 * In that off case, we need to allocate for all possible cpus.
585 #ifdef CONFIG_HOTPLUG_CPU
587 cpumask_copy(buffer->cpumask, cpu_online_mask);
589 cpumask_copy(buffer->cpumask, cpu_possible_mask);
591 buffer->cpus = nr_cpu_ids;
593 bsize = sizeof(void *) * nr_cpu_ids;
594 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
596 if (!buffer->buffers)
597 goto fail_free_cpumask;
599 for_each_buffer_cpu(buffer, cpu) {
600 buffer->buffers[cpu] =
601 rb_allocate_cpu_buffer(buffer, cpu);
602 if (!buffer->buffers[cpu])
603 goto fail_free_buffers;
606 #ifdef CONFIG_HOTPLUG_CPU
607 buffer->cpu_notify.notifier_call = rb_cpu_notify;
608 buffer->cpu_notify.priority = 0;
609 register_cpu_notifier(&buffer->cpu_notify);
613 mutex_init(&buffer->mutex);
618 for_each_buffer_cpu(buffer, cpu) {
619 if (buffer->buffers[cpu])
620 rb_free_cpu_buffer(buffer->buffers[cpu]);
622 kfree(buffer->buffers);
625 free_cpumask_var(buffer->cpumask);
632 EXPORT_SYMBOL_GPL(ring_buffer_alloc);
635 * ring_buffer_free - free a ring buffer.
636 * @buffer: the buffer to free.
639 ring_buffer_free(struct ring_buffer *buffer)
645 #ifdef CONFIG_HOTPLUG_CPU
646 unregister_cpu_notifier(&buffer->cpu_notify);
649 for_each_buffer_cpu(buffer, cpu)
650 rb_free_cpu_buffer(buffer->buffers[cpu]);
654 free_cpumask_var(buffer->cpumask);
658 EXPORT_SYMBOL_GPL(ring_buffer_free);
660 void ring_buffer_set_clock(struct ring_buffer *buffer,
663 buffer->clock = clock;
666 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
669 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
671 struct buffer_page *bpage;
675 atomic_inc(&cpu_buffer->record_disabled);
678 for (i = 0; i < nr_pages; i++) {
679 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
681 p = cpu_buffer->pages.next;
682 bpage = list_entry(p, struct buffer_page, list);
683 list_del_init(&bpage->list);
684 free_buffer_page(bpage);
686 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
689 rb_reset_cpu(cpu_buffer);
691 rb_check_pages(cpu_buffer);
693 atomic_dec(&cpu_buffer->record_disabled);
698 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
699 struct list_head *pages, unsigned nr_pages)
701 struct buffer_page *bpage;
705 atomic_inc(&cpu_buffer->record_disabled);
708 for (i = 0; i < nr_pages; i++) {
709 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
712 bpage = list_entry(p, struct buffer_page, list);
713 list_del_init(&bpage->list);
714 list_add_tail(&bpage->list, &cpu_buffer->pages);
716 rb_reset_cpu(cpu_buffer);
718 rb_check_pages(cpu_buffer);
720 atomic_dec(&cpu_buffer->record_disabled);
724 * ring_buffer_resize - resize the ring buffer
725 * @buffer: the buffer to resize.
726 * @size: the new size.
728 * The tracer is responsible for making sure that the buffer is
729 * not being used while changing the size.
730 * Note: We may be able to change the above requirement by using
731 * RCU synchronizations.
733 * Minimum size is 2 * BUF_PAGE_SIZE.
735 * Returns -1 on failure.
737 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
739 struct ring_buffer_per_cpu *cpu_buffer;
740 unsigned nr_pages, rm_pages, new_pages;
741 struct buffer_page *bpage, *tmp;
742 unsigned long buffer_size;
748 * Always succeed at resizing a non-existent buffer:
753 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
754 size *= BUF_PAGE_SIZE;
755 buffer_size = buffer->pages * BUF_PAGE_SIZE;
757 /* we need a minimum of two pages */
758 if (size < BUF_PAGE_SIZE * 2)
759 size = BUF_PAGE_SIZE * 2;
761 if (size == buffer_size)
764 mutex_lock(&buffer->mutex);
767 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
769 if (size < buffer_size) {
771 /* easy case, just free pages */
772 if (RB_WARN_ON(buffer, nr_pages >= buffer->pages))
775 rm_pages = buffer->pages - nr_pages;
777 for_each_buffer_cpu(buffer, cpu) {
778 cpu_buffer = buffer->buffers[cpu];
779 rb_remove_pages(cpu_buffer, rm_pages);
785 * This is a bit more difficult. We only want to add pages
786 * when we can allocate enough for all CPUs. We do this
787 * by allocating all the pages and storing them on a local
788 * link list. If we succeed in our allocation, then we
789 * add these pages to the cpu_buffers. Otherwise we just free
790 * them all and return -ENOMEM;
792 if (RB_WARN_ON(buffer, nr_pages <= buffer->pages))
795 new_pages = nr_pages - buffer->pages;
797 for_each_buffer_cpu(buffer, cpu) {
798 for (i = 0; i < new_pages; i++) {
799 bpage = kzalloc_node(ALIGN(sizeof(*bpage),
801 GFP_KERNEL, cpu_to_node(cpu));
804 list_add(&bpage->list, &pages);
805 addr = __get_free_page(GFP_KERNEL);
808 bpage->page = (void *)addr;
809 rb_init_page(bpage->page);
813 for_each_buffer_cpu(buffer, cpu) {
814 cpu_buffer = buffer->buffers[cpu];
815 rb_insert_pages(cpu_buffer, &pages, new_pages);
818 if (RB_WARN_ON(buffer, !list_empty(&pages)))
822 buffer->pages = nr_pages;
824 mutex_unlock(&buffer->mutex);
829 list_for_each_entry_safe(bpage, tmp, &pages, list) {
830 list_del_init(&bpage->list);
831 free_buffer_page(bpage);
834 mutex_unlock(&buffer->mutex);
838 * Something went totally wrong, and we are too paranoid
839 * to even clean up the mess.
843 mutex_unlock(&buffer->mutex);
846 EXPORT_SYMBOL_GPL(ring_buffer_resize);
848 static inline int rb_null_event(struct ring_buffer_event *event)
850 return event->type == RINGBUF_TYPE_PADDING;
854 __rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
856 return bpage->data + index;
859 static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
861 return bpage->page->data + index;
864 static inline struct ring_buffer_event *
865 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
867 return __rb_page_index(cpu_buffer->reader_page,
868 cpu_buffer->reader_page->read);
871 static inline struct ring_buffer_event *
872 rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
874 return __rb_page_index(cpu_buffer->head_page,
875 cpu_buffer->head_page->read);
878 static inline struct ring_buffer_event *
879 rb_iter_head_event(struct ring_buffer_iter *iter)
881 return __rb_page_index(iter->head_page, iter->head);
884 static inline unsigned rb_page_write(struct buffer_page *bpage)
886 return local_read(&bpage->write);
889 static inline unsigned rb_page_commit(struct buffer_page *bpage)
891 return local_read(&bpage->page->commit);
894 /* Size is determined by what has been commited */
895 static inline unsigned rb_page_size(struct buffer_page *bpage)
897 return rb_page_commit(bpage);
900 static inline unsigned
901 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
903 return rb_page_commit(cpu_buffer->commit_page);
906 static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
908 return rb_page_commit(cpu_buffer->head_page);
912 * When the tail hits the head and the buffer is in overwrite mode,
913 * the head jumps to the next page and all content on the previous
914 * page is discarded. But before doing so, we update the overrun
915 * variable of the buffer.
917 static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
919 struct ring_buffer_event *event;
922 for (head = 0; head < rb_head_size(cpu_buffer);
923 head += rb_event_length(event)) {
925 event = __rb_page_index(cpu_buffer->head_page, head);
926 if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
928 /* Only count data entries */
929 if (event->type != RINGBUF_TYPE_DATA)
931 cpu_buffer->overrun++;
932 cpu_buffer->entries--;
936 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
937 struct buffer_page **bpage)
939 struct list_head *p = (*bpage)->list.next;
941 if (p == &cpu_buffer->pages)
944 *bpage = list_entry(p, struct buffer_page, list);
947 static inline unsigned
948 rb_event_index(struct ring_buffer_event *event)
950 unsigned long addr = (unsigned long)event;
952 return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
956 rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
957 struct ring_buffer_event *event)
959 unsigned long addr = (unsigned long)event;
962 index = rb_event_index(event);
965 return cpu_buffer->commit_page->page == (void *)addr &&
966 rb_commit_index(cpu_buffer) == index;
970 rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
971 struct ring_buffer_event *event)
973 unsigned long addr = (unsigned long)event;
976 index = rb_event_index(event);
979 while (cpu_buffer->commit_page->page != (void *)addr) {
980 if (RB_WARN_ON(cpu_buffer,
981 cpu_buffer->commit_page == cpu_buffer->tail_page))
983 cpu_buffer->commit_page->page->commit =
984 cpu_buffer->commit_page->write;
985 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
986 cpu_buffer->write_stamp =
987 cpu_buffer->commit_page->page->time_stamp;
990 /* Now set the commit to the event's index */
991 local_set(&cpu_buffer->commit_page->page->commit, index);
995 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
998 * We only race with interrupts and NMIs on this CPU.
999 * If we own the commit event, then we can commit
1000 * all others that interrupted us, since the interruptions
1001 * are in stack format (they finish before they come
1002 * back to us). This allows us to do a simple loop to
1003 * assign the commit to the tail.
1006 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
1007 cpu_buffer->commit_page->page->commit =
1008 cpu_buffer->commit_page->write;
1009 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
1010 cpu_buffer->write_stamp =
1011 cpu_buffer->commit_page->page->time_stamp;
1012 /* add barrier to keep gcc from optimizing too much */
1015 while (rb_commit_index(cpu_buffer) !=
1016 rb_page_write(cpu_buffer->commit_page)) {
1017 cpu_buffer->commit_page->page->commit =
1018 cpu_buffer->commit_page->write;
1022 /* again, keep gcc from optimizing */
1026 * If an interrupt came in just after the first while loop
1027 * and pushed the tail page forward, we will be left with
1028 * a dangling commit that will never go forward.
1030 if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
1034 static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1036 cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
1037 cpu_buffer->reader_page->read = 0;
1040 static void rb_inc_iter(struct ring_buffer_iter *iter)
1042 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1045 * The iterator could be on the reader page (it starts there).
1046 * But the head could have moved, since the reader was
1047 * found. Check for this case and assign the iterator
1048 * to the head page instead of next.
1050 if (iter->head_page == cpu_buffer->reader_page)
1051 iter->head_page = cpu_buffer->head_page;
1053 rb_inc_page(cpu_buffer, &iter->head_page);
1055 iter->read_stamp = iter->head_page->page->time_stamp;
1060 * ring_buffer_update_event - update event type and data
1061 * @event: the even to update
1062 * @type: the type of event
1063 * @length: the size of the event field in the ring buffer
1065 * Update the type and data fields of the event. The length
1066 * is the actual size that is written to the ring buffer,
1067 * and with this, we can determine what to place into the
1071 rb_update_event(struct ring_buffer_event *event,
1072 unsigned type, unsigned length)
1078 case RINGBUF_TYPE_PADDING:
1081 case RINGBUF_TYPE_TIME_EXTEND:
1082 event->len = DIV_ROUND_UP(RB_LEN_TIME_EXTEND, RB_ALIGNMENT);
1085 case RINGBUF_TYPE_TIME_STAMP:
1086 event->len = DIV_ROUND_UP(RB_LEN_TIME_STAMP, RB_ALIGNMENT);
1089 case RINGBUF_TYPE_DATA:
1090 length -= RB_EVNT_HDR_SIZE;
1091 if (length > RB_MAX_SMALL_DATA) {
1093 event->array[0] = length;
1095 event->len = DIV_ROUND_UP(length, RB_ALIGNMENT);
1102 static unsigned rb_calculate_event_length(unsigned length)
1104 struct ring_buffer_event event; /* Used only for sizeof array */
1106 /* zero length can cause confusions */
1110 if (length > RB_MAX_SMALL_DATA)
1111 length += sizeof(event.array[0]);
1113 length += RB_EVNT_HDR_SIZE;
1114 length = ALIGN(length, RB_ALIGNMENT);
1119 static struct ring_buffer_event *
1120 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1121 unsigned type, unsigned long length, u64 *ts)
1123 struct buffer_page *tail_page, *head_page, *reader_page, *commit_page;
1124 unsigned long tail, write;
1125 struct ring_buffer *buffer = cpu_buffer->buffer;
1126 struct ring_buffer_event *event;
1127 unsigned long flags;
1128 bool lock_taken = false;
1130 commit_page = cpu_buffer->commit_page;
1131 /* we just need to protect against interrupts */
1133 tail_page = cpu_buffer->tail_page;
1134 write = local_add_return(length, &tail_page->write);
1135 tail = write - length;
1137 /* See if we shot pass the end of this buffer page */
1138 if (write > BUF_PAGE_SIZE) {
1139 struct buffer_page *next_page = tail_page;
1141 local_irq_save(flags);
1143 * Since the write to the buffer is still not
1144 * fully lockless, we must be careful with NMIs.
1145 * The locks in the writers are taken when a write
1146 * crosses to a new page. The locks protect against
1147 * races with the readers (this will soon be fixed
1148 * with a lockless solution).
1150 * Because we can not protect against NMIs, and we
1151 * want to keep traces reentrant, we need to manage
1152 * what happens when we are in an NMI.
1154 * NMIs can happen after we take the lock.
1155 * If we are in an NMI, only take the lock
1156 * if it is not already taken. Otherwise
1159 if (unlikely(in_nmi())) {
1160 if (!__raw_spin_trylock(&cpu_buffer->lock))
1163 __raw_spin_lock(&cpu_buffer->lock);
1167 rb_inc_page(cpu_buffer, &next_page);
1169 head_page = cpu_buffer->head_page;
1170 reader_page = cpu_buffer->reader_page;
1172 /* we grabbed the lock before incrementing */
1173 if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
1177 * If for some reason, we had an interrupt storm that made
1178 * it all the way around the buffer, bail, and warn
1181 if (unlikely(next_page == commit_page)) {
1186 if (next_page == head_page) {
1187 if (!(buffer->flags & RB_FL_OVERWRITE))
1190 /* tail_page has not moved yet? */
1191 if (tail_page == cpu_buffer->tail_page) {
1192 /* count overflows */
1193 rb_update_overflow(cpu_buffer);
1195 rb_inc_page(cpu_buffer, &head_page);
1196 cpu_buffer->head_page = head_page;
1197 cpu_buffer->head_page->read = 0;
1202 * If the tail page is still the same as what we think
1203 * it is, then it is up to us to update the tail
1206 if (tail_page == cpu_buffer->tail_page) {
1207 local_set(&next_page->write, 0);
1208 local_set(&next_page->page->commit, 0);
1209 cpu_buffer->tail_page = next_page;
1211 /* reread the time stamp */
1212 *ts = ring_buffer_time_stamp(buffer, cpu_buffer->cpu);
1213 cpu_buffer->tail_page->page->time_stamp = *ts;
1217 * The actual tail page has moved forward.
1219 if (tail < BUF_PAGE_SIZE) {
1220 /* Mark the rest of the page with padding */
1221 event = __rb_page_index(tail_page, tail);
1222 event->type = RINGBUF_TYPE_PADDING;
1225 if (tail <= BUF_PAGE_SIZE)
1226 /* Set the write back to the previous setting */
1227 local_set(&tail_page->write, tail);
1230 * If this was a commit entry that failed,
1231 * increment that too
1233 if (tail_page == cpu_buffer->commit_page &&
1234 tail == rb_commit_index(cpu_buffer)) {
1235 rb_set_commit_to_write(cpu_buffer);
1238 __raw_spin_unlock(&cpu_buffer->lock);
1239 local_irq_restore(flags);
1241 /* fail and let the caller try again */
1242 return ERR_PTR(-EAGAIN);
1245 /* We reserved something on the buffer */
1247 if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE))
1250 event = __rb_page_index(tail_page, tail);
1251 rb_update_event(event, type, length);
1254 * If this is a commit and the tail is zero, then update
1255 * this page's time stamp.
1257 if (!tail && rb_is_commit(cpu_buffer, event))
1258 cpu_buffer->commit_page->page->time_stamp = *ts;
1264 if (tail <= BUF_PAGE_SIZE)
1265 local_set(&tail_page->write, tail);
1267 if (likely(lock_taken))
1268 __raw_spin_unlock(&cpu_buffer->lock);
1269 local_irq_restore(flags);
1274 rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1275 u64 *ts, u64 *delta)
1277 struct ring_buffer_event *event;
1281 if (unlikely(*delta > (1ULL << 59) && !once++)) {
1282 printk(KERN_WARNING "Delta way too big! %llu"
1283 " ts=%llu write stamp = %llu\n",
1284 (unsigned long long)*delta,
1285 (unsigned long long)*ts,
1286 (unsigned long long)cpu_buffer->write_stamp);
1291 * The delta is too big, we to add a
1294 event = __rb_reserve_next(cpu_buffer,
1295 RINGBUF_TYPE_TIME_EXTEND,
1301 if (PTR_ERR(event) == -EAGAIN)
1304 /* Only a commited time event can update the write stamp */
1305 if (rb_is_commit(cpu_buffer, event)) {
1307 * If this is the first on the page, then we need to
1308 * update the page itself, and just put in a zero.
1310 if (rb_event_index(event)) {
1311 event->time_delta = *delta & TS_MASK;
1312 event->array[0] = *delta >> TS_SHIFT;
1314 cpu_buffer->commit_page->page->time_stamp = *ts;
1315 event->time_delta = 0;
1316 event->array[0] = 0;
1318 cpu_buffer->write_stamp = *ts;
1319 /* let the caller know this was the commit */
1322 /* Darn, this is just wasted space */
1323 event->time_delta = 0;
1324 event->array[0] = 0;
1333 static struct ring_buffer_event *
1334 rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1335 unsigned type, unsigned long length)
1337 struct ring_buffer_event *event;
1344 * We allow for interrupts to reenter here and do a trace.
1345 * If one does, it will cause this original code to loop
1346 * back here. Even with heavy interrupts happening, this
1347 * should only happen a few times in a row. If this happens
1348 * 1000 times in a row, there must be either an interrupt
1349 * storm or we have something buggy.
1352 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
1355 ts = ring_buffer_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu);
1358 * Only the first commit can update the timestamp.
1359 * Yes there is a race here. If an interrupt comes in
1360 * just after the conditional and it traces too, then it
1361 * will also check the deltas. More than one timestamp may
1362 * also be made. But only the entry that did the actual
1363 * commit will be something other than zero.
1365 if (cpu_buffer->tail_page == cpu_buffer->commit_page &&
1366 rb_page_write(cpu_buffer->tail_page) ==
1367 rb_commit_index(cpu_buffer)) {
1369 delta = ts - cpu_buffer->write_stamp;
1371 /* make sure this delta is calculated here */
1374 /* Did the write stamp get updated already? */
1375 if (unlikely(ts < cpu_buffer->write_stamp))
1378 if (test_time_stamp(delta)) {
1380 commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
1382 if (commit == -EBUSY)
1385 if (commit == -EAGAIN)
1388 RB_WARN_ON(cpu_buffer, commit < 0);
1391 /* Non commits have zero deltas */
1394 event = __rb_reserve_next(cpu_buffer, type, length, &ts);
1395 if (PTR_ERR(event) == -EAGAIN)
1399 if (unlikely(commit))
1401 * Ouch! We needed a timestamp and it was commited. But
1402 * we didn't get our event reserved.
1404 rb_set_commit_to_write(cpu_buffer);
1409 * If the timestamp was commited, make the commit our entry
1410 * now so that we will update it when needed.
1413 rb_set_commit_event(cpu_buffer, event);
1414 else if (!rb_is_commit(cpu_buffer, event))
1417 event->time_delta = delta;
1422 static DEFINE_PER_CPU(int, rb_need_resched);
1425 * ring_buffer_lock_reserve - reserve a part of the buffer
1426 * @buffer: the ring buffer to reserve from
1427 * @length: the length of the data to reserve (excluding event header)
1429 * Returns a reseverd event on the ring buffer to copy directly to.
1430 * The user of this interface will need to get the body to write into
1431 * and can use the ring_buffer_event_data() interface.
1433 * The length is the length of the data needed, not the event length
1434 * which also includes the event header.
1436 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
1437 * If NULL is returned, then nothing has been allocated or locked.
1439 struct ring_buffer_event *
1440 ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
1442 struct ring_buffer_per_cpu *cpu_buffer;
1443 struct ring_buffer_event *event;
1446 if (ring_buffer_flags != RB_BUFFERS_ON)
1449 if (atomic_read(&buffer->record_disabled))
1452 /* If we are tracing schedule, we don't want to recurse */
1453 resched = ftrace_preempt_disable();
1455 cpu = raw_smp_processor_id();
1457 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1460 cpu_buffer = buffer->buffers[cpu];
1462 if (atomic_read(&cpu_buffer->record_disabled))
1465 length = rb_calculate_event_length(length);
1466 if (length > BUF_PAGE_SIZE)
1469 event = rb_reserve_next_event(cpu_buffer, RINGBUF_TYPE_DATA, length);
1474 * Need to store resched state on this cpu.
1475 * Only the first needs to.
1478 if (preempt_count() == 1)
1479 per_cpu(rb_need_resched, cpu) = resched;
1484 ftrace_preempt_enable(resched);
1487 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
1489 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1490 struct ring_buffer_event *event)
1492 cpu_buffer->entries++;
1494 /* Only process further if we own the commit */
1495 if (!rb_is_commit(cpu_buffer, event))
1498 cpu_buffer->write_stamp += event->time_delta;
1500 rb_set_commit_to_write(cpu_buffer);
1504 * ring_buffer_unlock_commit - commit a reserved
1505 * @buffer: The buffer to commit to
1506 * @event: The event pointer to commit.
1508 * This commits the data to the ring buffer, and releases any locks held.
1510 * Must be paired with ring_buffer_lock_reserve.
1512 int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1513 struct ring_buffer_event *event)
1515 struct ring_buffer_per_cpu *cpu_buffer;
1516 int cpu = raw_smp_processor_id();
1518 cpu_buffer = buffer->buffers[cpu];
1520 rb_commit(cpu_buffer, event);
1523 * Only the last preempt count needs to restore preemption.
1525 if (preempt_count() == 1)
1526 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1528 preempt_enable_no_resched_notrace();
1532 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
1535 * ring_buffer_write - write data to the buffer without reserving
1536 * @buffer: The ring buffer to write to.
1537 * @length: The length of the data being written (excluding the event header)
1538 * @data: The data to write to the buffer.
1540 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
1541 * one function. If you already have the data to write to the buffer, it
1542 * may be easier to simply call this function.
1544 * Note, like ring_buffer_lock_reserve, the length is the length of the data
1545 * and not the length of the event which would hold the header.
1547 int ring_buffer_write(struct ring_buffer *buffer,
1548 unsigned long length,
1551 struct ring_buffer_per_cpu *cpu_buffer;
1552 struct ring_buffer_event *event;
1553 unsigned long event_length;
1558 if (ring_buffer_flags != RB_BUFFERS_ON)
1561 if (atomic_read(&buffer->record_disabled))
1564 resched = ftrace_preempt_disable();
1566 cpu = raw_smp_processor_id();
1568 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1571 cpu_buffer = buffer->buffers[cpu];
1573 if (atomic_read(&cpu_buffer->record_disabled))
1576 event_length = rb_calculate_event_length(length);
1577 event = rb_reserve_next_event(cpu_buffer,
1578 RINGBUF_TYPE_DATA, event_length);
1582 body = rb_event_data(event);
1584 memcpy(body, data, length);
1586 rb_commit(cpu_buffer, event);
1590 ftrace_preempt_enable(resched);
1594 EXPORT_SYMBOL_GPL(ring_buffer_write);
1596 static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1598 struct buffer_page *reader = cpu_buffer->reader_page;
1599 struct buffer_page *head = cpu_buffer->head_page;
1600 struct buffer_page *commit = cpu_buffer->commit_page;
1602 return reader->read == rb_page_commit(reader) &&
1603 (commit == reader ||
1605 head->read == rb_page_commit(commit)));
1609 * ring_buffer_record_disable - stop all writes into the buffer
1610 * @buffer: The ring buffer to stop writes to.
1612 * This prevents all writes to the buffer. Any attempt to write
1613 * to the buffer after this will fail and return NULL.
1615 * The caller should call synchronize_sched() after this.
1617 void ring_buffer_record_disable(struct ring_buffer *buffer)
1619 atomic_inc(&buffer->record_disabled);
1621 EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
1624 * ring_buffer_record_enable - enable writes to the buffer
1625 * @buffer: The ring buffer to enable writes
1627 * Note, multiple disables will need the same number of enables
1628 * to truely enable the writing (much like preempt_disable).
1630 void ring_buffer_record_enable(struct ring_buffer *buffer)
1632 atomic_dec(&buffer->record_disabled);
1634 EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
1637 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1638 * @buffer: The ring buffer to stop writes to.
1639 * @cpu: The CPU buffer to stop
1641 * This prevents all writes to the buffer. Any attempt to write
1642 * to the buffer after this will fail and return NULL.
1644 * The caller should call synchronize_sched() after this.
1646 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1648 struct ring_buffer_per_cpu *cpu_buffer;
1650 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1653 cpu_buffer = buffer->buffers[cpu];
1654 atomic_inc(&cpu_buffer->record_disabled);
1656 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
1659 * ring_buffer_record_enable_cpu - enable writes to the buffer
1660 * @buffer: The ring buffer to enable writes
1661 * @cpu: The CPU to enable.
1663 * Note, multiple disables will need the same number of enables
1664 * to truely enable the writing (much like preempt_disable).
1666 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1668 struct ring_buffer_per_cpu *cpu_buffer;
1670 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1673 cpu_buffer = buffer->buffers[cpu];
1674 atomic_dec(&cpu_buffer->record_disabled);
1676 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
1679 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1680 * @buffer: The ring buffer
1681 * @cpu: The per CPU buffer to get the entries from.
1683 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1685 struct ring_buffer_per_cpu *cpu_buffer;
1688 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1691 cpu_buffer = buffer->buffers[cpu];
1692 ret = cpu_buffer->entries;
1696 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
1699 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1700 * @buffer: The ring buffer
1701 * @cpu: The per CPU buffer to get the number of overruns from
1703 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1705 struct ring_buffer_per_cpu *cpu_buffer;
1708 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1711 cpu_buffer = buffer->buffers[cpu];
1712 ret = cpu_buffer->overrun;
1716 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
1719 * ring_buffer_entries - get the number of entries in a buffer
1720 * @buffer: The ring buffer
1722 * Returns the total number of entries in the ring buffer
1725 unsigned long ring_buffer_entries(struct ring_buffer *buffer)
1727 struct ring_buffer_per_cpu *cpu_buffer;
1728 unsigned long entries = 0;
1731 /* if you care about this being correct, lock the buffer */
1732 for_each_buffer_cpu(buffer, cpu) {
1733 cpu_buffer = buffer->buffers[cpu];
1734 entries += cpu_buffer->entries;
1739 EXPORT_SYMBOL_GPL(ring_buffer_entries);
1742 * ring_buffer_overrun_cpu - get the number of overruns in buffer
1743 * @buffer: The ring buffer
1745 * Returns the total number of overruns in the ring buffer
1748 unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
1750 struct ring_buffer_per_cpu *cpu_buffer;
1751 unsigned long overruns = 0;
1754 /* if you care about this being correct, lock the buffer */
1755 for_each_buffer_cpu(buffer, cpu) {
1756 cpu_buffer = buffer->buffers[cpu];
1757 overruns += cpu_buffer->overrun;
1762 EXPORT_SYMBOL_GPL(ring_buffer_overruns);
1764 static void rb_iter_reset(struct ring_buffer_iter *iter)
1766 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1768 /* Iterator usage is expected to have record disabled */
1769 if (list_empty(&cpu_buffer->reader_page->list)) {
1770 iter->head_page = cpu_buffer->head_page;
1771 iter->head = cpu_buffer->head_page->read;
1773 iter->head_page = cpu_buffer->reader_page;
1774 iter->head = cpu_buffer->reader_page->read;
1777 iter->read_stamp = cpu_buffer->read_stamp;
1779 iter->read_stamp = iter->head_page->page->time_stamp;
1783 * ring_buffer_iter_reset - reset an iterator
1784 * @iter: The iterator to reset
1786 * Resets the iterator, so that it will start from the beginning
1789 void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1791 struct ring_buffer_per_cpu *cpu_buffer;
1792 unsigned long flags;
1797 cpu_buffer = iter->cpu_buffer;
1799 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1800 rb_iter_reset(iter);
1801 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1803 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
1806 * ring_buffer_iter_empty - check if an iterator has no more to read
1807 * @iter: The iterator to check
1809 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
1811 struct ring_buffer_per_cpu *cpu_buffer;
1813 cpu_buffer = iter->cpu_buffer;
1815 return iter->head_page == cpu_buffer->commit_page &&
1816 iter->head == rb_commit_index(cpu_buffer);
1818 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
1821 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1822 struct ring_buffer_event *event)
1826 switch (event->type) {
1827 case RINGBUF_TYPE_PADDING:
1830 case RINGBUF_TYPE_TIME_EXTEND:
1831 delta = event->array[0];
1833 delta += event->time_delta;
1834 cpu_buffer->read_stamp += delta;
1837 case RINGBUF_TYPE_TIME_STAMP:
1838 /* FIXME: not implemented */
1841 case RINGBUF_TYPE_DATA:
1842 cpu_buffer->read_stamp += event->time_delta;
1852 rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
1853 struct ring_buffer_event *event)
1857 switch (event->type) {
1858 case RINGBUF_TYPE_PADDING:
1861 case RINGBUF_TYPE_TIME_EXTEND:
1862 delta = event->array[0];
1864 delta += event->time_delta;
1865 iter->read_stamp += delta;
1868 case RINGBUF_TYPE_TIME_STAMP:
1869 /* FIXME: not implemented */
1872 case RINGBUF_TYPE_DATA:
1873 iter->read_stamp += event->time_delta;
1882 static struct buffer_page *
1883 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1885 struct buffer_page *reader = NULL;
1886 unsigned long flags;
1889 local_irq_save(flags);
1890 __raw_spin_lock(&cpu_buffer->lock);
1894 * This should normally only loop twice. But because the
1895 * start of the reader inserts an empty page, it causes
1896 * a case where we will loop three times. There should be no
1897 * reason to loop four times (that I know of).
1899 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
1904 reader = cpu_buffer->reader_page;
1906 /* If there's more to read, return this page */
1907 if (cpu_buffer->reader_page->read < rb_page_size(reader))
1910 /* Never should we have an index greater than the size */
1911 if (RB_WARN_ON(cpu_buffer,
1912 cpu_buffer->reader_page->read > rb_page_size(reader)))
1915 /* check if we caught up to the tail */
1917 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
1921 * Splice the empty reader page into the list around the head.
1922 * Reset the reader page to size zero.
1925 reader = cpu_buffer->head_page;
1926 cpu_buffer->reader_page->list.next = reader->list.next;
1927 cpu_buffer->reader_page->list.prev = reader->list.prev;
1929 local_set(&cpu_buffer->reader_page->write, 0);
1930 local_set(&cpu_buffer->reader_page->page->commit, 0);
1932 /* Make the reader page now replace the head */
1933 reader->list.prev->next = &cpu_buffer->reader_page->list;
1934 reader->list.next->prev = &cpu_buffer->reader_page->list;
1937 * If the tail is on the reader, then we must set the head
1938 * to the inserted page, otherwise we set it one before.
1940 cpu_buffer->head_page = cpu_buffer->reader_page;
1942 if (cpu_buffer->commit_page != reader)
1943 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
1945 /* Finally update the reader page to the new head */
1946 cpu_buffer->reader_page = reader;
1947 rb_reset_reader_page(cpu_buffer);
1952 __raw_spin_unlock(&cpu_buffer->lock);
1953 local_irq_restore(flags);
1958 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
1960 struct ring_buffer_event *event;
1961 struct buffer_page *reader;
1964 reader = rb_get_reader_page(cpu_buffer);
1966 /* This function should not be called when buffer is empty */
1967 if (RB_WARN_ON(cpu_buffer, !reader))
1970 event = rb_reader_event(cpu_buffer);
1972 if (event->type == RINGBUF_TYPE_DATA)
1973 cpu_buffer->entries--;
1975 rb_update_read_stamp(cpu_buffer, event);
1977 length = rb_event_length(event);
1978 cpu_buffer->reader_page->read += length;
1981 static void rb_advance_iter(struct ring_buffer_iter *iter)
1983 struct ring_buffer *buffer;
1984 struct ring_buffer_per_cpu *cpu_buffer;
1985 struct ring_buffer_event *event;
1988 cpu_buffer = iter->cpu_buffer;
1989 buffer = cpu_buffer->buffer;
1992 * Check if we are at the end of the buffer.
1994 if (iter->head >= rb_page_size(iter->head_page)) {
1995 if (RB_WARN_ON(buffer,
1996 iter->head_page == cpu_buffer->commit_page))
2002 event = rb_iter_head_event(iter);
2004 length = rb_event_length(event);
2007 * This should not be called to advance the header if we are
2008 * at the tail of the buffer.
2010 if (RB_WARN_ON(cpu_buffer,
2011 (iter->head_page == cpu_buffer->commit_page) &&
2012 (iter->head + length > rb_commit_index(cpu_buffer))))
2015 rb_update_iter_read_stamp(iter, event);
2017 iter->head += length;
2019 /* check for end of page padding */
2020 if ((iter->head >= rb_page_size(iter->head_page)) &&
2021 (iter->head_page != cpu_buffer->commit_page))
2022 rb_advance_iter(iter);
2025 static struct ring_buffer_event *
2026 rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
2028 struct ring_buffer_per_cpu *cpu_buffer;
2029 struct ring_buffer_event *event;
2030 struct buffer_page *reader;
2033 cpu_buffer = buffer->buffers[cpu];
2037 * We repeat when a timestamp is encountered. It is possible
2038 * to get multiple timestamps from an interrupt entering just
2039 * as one timestamp is about to be written. The max times
2040 * that this can happen is the number of nested interrupts we
2041 * can have. Nesting 10 deep of interrupts is clearly
2044 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
2047 reader = rb_get_reader_page(cpu_buffer);
2051 event = rb_reader_event(cpu_buffer);
2053 switch (event->type) {
2054 case RINGBUF_TYPE_PADDING:
2055 RB_WARN_ON(cpu_buffer, 1);
2056 rb_advance_reader(cpu_buffer);
2059 case RINGBUF_TYPE_TIME_EXTEND:
2060 /* Internal data, OK to advance */
2061 rb_advance_reader(cpu_buffer);
2064 case RINGBUF_TYPE_TIME_STAMP:
2065 /* FIXME: not implemented */
2066 rb_advance_reader(cpu_buffer);
2069 case RINGBUF_TYPE_DATA:
2071 *ts = cpu_buffer->read_stamp + event->time_delta;
2072 ring_buffer_normalize_time_stamp(buffer,
2073 cpu_buffer->cpu, ts);
2083 EXPORT_SYMBOL_GPL(ring_buffer_peek);
2085 static struct ring_buffer_event *
2086 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
2088 struct ring_buffer *buffer;
2089 struct ring_buffer_per_cpu *cpu_buffer;
2090 struct ring_buffer_event *event;
2093 if (ring_buffer_iter_empty(iter))
2096 cpu_buffer = iter->cpu_buffer;
2097 buffer = cpu_buffer->buffer;
2101 * We repeat when a timestamp is encountered. It is possible
2102 * to get multiple timestamps from an interrupt entering just
2103 * as one timestamp is about to be written. The max times
2104 * that this can happen is the number of nested interrupts we
2105 * can have. Nesting 10 deep of interrupts is clearly
2108 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
2111 if (rb_per_cpu_empty(cpu_buffer))
2114 event = rb_iter_head_event(iter);
2116 switch (event->type) {
2117 case RINGBUF_TYPE_PADDING:
2121 case RINGBUF_TYPE_TIME_EXTEND:
2122 /* Internal data, OK to advance */
2123 rb_advance_iter(iter);
2126 case RINGBUF_TYPE_TIME_STAMP:
2127 /* FIXME: not implemented */
2128 rb_advance_iter(iter);
2131 case RINGBUF_TYPE_DATA:
2133 *ts = iter->read_stamp + event->time_delta;
2134 ring_buffer_normalize_time_stamp(buffer,
2135 cpu_buffer->cpu, ts);
2145 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
2148 * ring_buffer_peek - peek at the next event to be read
2149 * @buffer: The ring buffer to read
2150 * @cpu: The cpu to peak at
2151 * @ts: The timestamp counter of this event.
2153 * This will return the event that will be read next, but does
2154 * not consume the data.
2156 struct ring_buffer_event *
2157 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
2159 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2160 struct ring_buffer_event *event;
2161 unsigned long flags;
2163 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2166 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2167 event = rb_buffer_peek(buffer, cpu, ts);
2168 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2174 * ring_buffer_iter_peek - peek at the next event to be read
2175 * @iter: The ring buffer iterator
2176 * @ts: The timestamp counter of this event.
2178 * This will return the event that will be read next, but does
2179 * not increment the iterator.
2181 struct ring_buffer_event *
2182 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
2184 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2185 struct ring_buffer_event *event;
2186 unsigned long flags;
2188 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2189 event = rb_iter_peek(iter, ts);
2190 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2196 * ring_buffer_consume - return an event and consume it
2197 * @buffer: The ring buffer to get the next event from
2199 * Returns the next event in the ring buffer, and that event is consumed.
2200 * Meaning, that sequential reads will keep returning a different event,
2201 * and eventually empty the ring buffer if the producer is slower.
2203 struct ring_buffer_event *
2204 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
2206 struct ring_buffer_per_cpu *cpu_buffer;
2207 struct ring_buffer_event *event = NULL;
2208 unsigned long flags;
2210 /* might be called in atomic */
2213 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2216 cpu_buffer = buffer->buffers[cpu];
2217 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2219 event = rb_buffer_peek(buffer, cpu, ts);
2223 rb_advance_reader(cpu_buffer);
2226 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2233 EXPORT_SYMBOL_GPL(ring_buffer_consume);
2236 * ring_buffer_read_start - start a non consuming read of the buffer
2237 * @buffer: The ring buffer to read from
2238 * @cpu: The cpu buffer to iterate over
2240 * This starts up an iteration through the buffer. It also disables
2241 * the recording to the buffer until the reading is finished.
2242 * This prevents the reading from being corrupted. This is not
2243 * a consuming read, so a producer is not expected.
2245 * Must be paired with ring_buffer_finish.
2247 struct ring_buffer_iter *
2248 ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
2250 struct ring_buffer_per_cpu *cpu_buffer;
2251 struct ring_buffer_iter *iter;
2252 unsigned long flags;
2254 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2257 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
2261 cpu_buffer = buffer->buffers[cpu];
2263 iter->cpu_buffer = cpu_buffer;
2265 atomic_inc(&cpu_buffer->record_disabled);
2266 synchronize_sched();
2268 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2269 __raw_spin_lock(&cpu_buffer->lock);
2270 rb_iter_reset(iter);
2271 __raw_spin_unlock(&cpu_buffer->lock);
2272 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2276 EXPORT_SYMBOL_GPL(ring_buffer_read_start);
2279 * ring_buffer_finish - finish reading the iterator of the buffer
2280 * @iter: The iterator retrieved by ring_buffer_start
2282 * This re-enables the recording to the buffer, and frees the
2286 ring_buffer_read_finish(struct ring_buffer_iter *iter)
2288 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2290 atomic_dec(&cpu_buffer->record_disabled);
2293 EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
2296 * ring_buffer_read - read the next item in the ring buffer by the iterator
2297 * @iter: The ring buffer iterator
2298 * @ts: The time stamp of the event read.
2300 * This reads the next event in the ring buffer and increments the iterator.
2302 struct ring_buffer_event *
2303 ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
2305 struct ring_buffer_event *event;
2306 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2307 unsigned long flags;
2309 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2310 event = rb_iter_peek(iter, ts);
2314 rb_advance_iter(iter);
2316 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2320 EXPORT_SYMBOL_GPL(ring_buffer_read);
2323 * ring_buffer_size - return the size of the ring buffer (in bytes)
2324 * @buffer: The ring buffer.
2326 unsigned long ring_buffer_size(struct ring_buffer *buffer)
2328 return BUF_PAGE_SIZE * buffer->pages;
2330 EXPORT_SYMBOL_GPL(ring_buffer_size);
2333 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
2335 cpu_buffer->head_page
2336 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
2337 local_set(&cpu_buffer->head_page->write, 0);
2338 local_set(&cpu_buffer->head_page->page->commit, 0);
2340 cpu_buffer->head_page->read = 0;
2342 cpu_buffer->tail_page = cpu_buffer->head_page;
2343 cpu_buffer->commit_page = cpu_buffer->head_page;
2345 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
2346 local_set(&cpu_buffer->reader_page->write, 0);
2347 local_set(&cpu_buffer->reader_page->page->commit, 0);
2348 cpu_buffer->reader_page->read = 0;
2350 cpu_buffer->overrun = 0;
2351 cpu_buffer->entries = 0;
2353 cpu_buffer->write_stamp = 0;
2354 cpu_buffer->read_stamp = 0;
2358 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
2359 * @buffer: The ring buffer to reset a per cpu buffer of
2360 * @cpu: The CPU buffer to be reset
2362 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2364 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2365 unsigned long flags;
2367 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2370 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2372 __raw_spin_lock(&cpu_buffer->lock);
2374 rb_reset_cpu(cpu_buffer);
2376 __raw_spin_unlock(&cpu_buffer->lock);
2378 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2380 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
2383 * ring_buffer_reset - reset a ring buffer
2384 * @buffer: The ring buffer to reset all cpu buffers
2386 void ring_buffer_reset(struct ring_buffer *buffer)
2390 for_each_buffer_cpu(buffer, cpu)
2391 ring_buffer_reset_cpu(buffer, cpu);
2393 EXPORT_SYMBOL_GPL(ring_buffer_reset);
2396 * rind_buffer_empty - is the ring buffer empty?
2397 * @buffer: The ring buffer to test
2399 int ring_buffer_empty(struct ring_buffer *buffer)
2401 struct ring_buffer_per_cpu *cpu_buffer;
2404 /* yes this is racy, but if you don't like the race, lock the buffer */
2405 for_each_buffer_cpu(buffer, cpu) {
2406 cpu_buffer = buffer->buffers[cpu];
2407 if (!rb_per_cpu_empty(cpu_buffer))
2413 EXPORT_SYMBOL_GPL(ring_buffer_empty);
2416 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
2417 * @buffer: The ring buffer
2418 * @cpu: The CPU buffer to test
2420 int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2422 struct ring_buffer_per_cpu *cpu_buffer;
2425 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2428 cpu_buffer = buffer->buffers[cpu];
2429 ret = rb_per_cpu_empty(cpu_buffer);
2434 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
2437 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
2438 * @buffer_a: One buffer to swap with
2439 * @buffer_b: The other buffer to swap with
2441 * This function is useful for tracers that want to take a "snapshot"
2442 * of a CPU buffer and has another back up buffer lying around.
2443 * it is expected that the tracer handles the cpu buffer not being
2444 * used at the moment.
2446 int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2447 struct ring_buffer *buffer_b, int cpu)
2449 struct ring_buffer_per_cpu *cpu_buffer_a;
2450 struct ring_buffer_per_cpu *cpu_buffer_b;
2453 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
2454 !cpumask_test_cpu(cpu, buffer_b->cpumask))
2457 /* At least make sure the two buffers are somewhat the same */
2458 if (buffer_a->pages != buffer_b->pages)
2463 if (ring_buffer_flags != RB_BUFFERS_ON)
2466 if (atomic_read(&buffer_a->record_disabled))
2469 if (atomic_read(&buffer_b->record_disabled))
2472 cpu_buffer_a = buffer_a->buffers[cpu];
2473 cpu_buffer_b = buffer_b->buffers[cpu];
2475 if (atomic_read(&cpu_buffer_a->record_disabled))
2478 if (atomic_read(&cpu_buffer_b->record_disabled))
2482 * We can't do a synchronize_sched here because this
2483 * function can be called in atomic context.
2484 * Normally this will be called from the same CPU as cpu.
2485 * If not it's up to the caller to protect this.
2487 atomic_inc(&cpu_buffer_a->record_disabled);
2488 atomic_inc(&cpu_buffer_b->record_disabled);
2490 buffer_a->buffers[cpu] = cpu_buffer_b;
2491 buffer_b->buffers[cpu] = cpu_buffer_a;
2493 cpu_buffer_b->buffer = buffer_a;
2494 cpu_buffer_a->buffer = buffer_b;
2496 atomic_dec(&cpu_buffer_a->record_disabled);
2497 atomic_dec(&cpu_buffer_b->record_disabled);
2503 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
2505 static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
2506 struct buffer_data_page *bpage,
2507 unsigned int offset)
2509 struct ring_buffer_event *event;
2512 __raw_spin_lock(&cpu_buffer->lock);
2513 for (head = offset; head < local_read(&bpage->commit);
2514 head += rb_event_length(event)) {
2516 event = __rb_data_page_index(bpage, head);
2517 if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
2519 /* Only count data entries */
2520 if (event->type != RINGBUF_TYPE_DATA)
2522 cpu_buffer->entries--;
2524 __raw_spin_unlock(&cpu_buffer->lock);
2528 * ring_buffer_alloc_read_page - allocate a page to read from buffer
2529 * @buffer: the buffer to allocate for.
2531 * This function is used in conjunction with ring_buffer_read_page.
2532 * When reading a full page from the ring buffer, these functions
2533 * can be used to speed up the process. The calling function should
2534 * allocate a few pages first with this function. Then when it
2535 * needs to get pages from the ring buffer, it passes the result
2536 * of this function into ring_buffer_read_page, which will swap
2537 * the page that was allocated, with the read page of the buffer.
2540 * The page allocated, or NULL on error.
2542 void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
2544 struct buffer_data_page *bpage;
2547 addr = __get_free_page(GFP_KERNEL);
2551 bpage = (void *)addr;
2553 rb_init_page(bpage);
2559 * ring_buffer_free_read_page - free an allocated read page
2560 * @buffer: the buffer the page was allocate for
2561 * @data: the page to free
2563 * Free a page allocated from ring_buffer_alloc_read_page.
2565 void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
2567 free_page((unsigned long)data);
2571 * ring_buffer_read_page - extract a page from the ring buffer
2572 * @buffer: buffer to extract from
2573 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
2574 * @len: amount to extract
2575 * @cpu: the cpu of the buffer to extract
2576 * @full: should the extraction only happen when the page is full.
2578 * This function will pull out a page from the ring buffer and consume it.
2579 * @data_page must be the address of the variable that was returned
2580 * from ring_buffer_alloc_read_page. This is because the page might be used
2581 * to swap with a page in the ring buffer.
2584 * rpage = ring_buffer_alloc_read_page(buffer);
2587 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
2589 * process_page(rpage, ret);
2591 * When @full is set, the function will not return true unless
2592 * the writer is off the reader page.
2594 * Note: it is up to the calling functions to handle sleeps and wakeups.
2595 * The ring buffer can be used anywhere in the kernel and can not
2596 * blindly call wake_up. The layer that uses the ring buffer must be
2597 * responsible for that.
2600 * >=0 if data has been transferred, returns the offset of consumed data.
2601 * <0 if no data has been transferred.
2603 int ring_buffer_read_page(struct ring_buffer *buffer,
2604 void **data_page, size_t len, int cpu, int full)
2606 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2607 struct ring_buffer_event *event;
2608 struct buffer_data_page *bpage;
2609 struct buffer_page *reader;
2610 unsigned long flags;
2611 unsigned int commit;
2616 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2620 * If len is not big enough to hold the page header, then
2621 * we can not copy anything.
2623 if (len <= BUF_PAGE_HDR_SIZE)
2626 len -= BUF_PAGE_HDR_SIZE;
2635 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2637 reader = rb_get_reader_page(cpu_buffer);
2641 event = rb_reader_event(cpu_buffer);
2643 read = reader->read;
2644 commit = rb_page_commit(reader);
2647 * If this page has been partially read or
2648 * if len is not big enough to read the rest of the page or
2649 * a writer is still on the page, then
2650 * we must copy the data from the page to the buffer.
2651 * Otherwise, we can simply swap the page with the one passed in.
2653 if (read || (len < (commit - read)) ||
2654 cpu_buffer->reader_page == cpu_buffer->commit_page) {
2655 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
2656 unsigned int rpos = read;
2657 unsigned int pos = 0;
2663 if (len > (commit - read))
2664 len = (commit - read);
2666 size = rb_event_length(event);
2671 /* save the current timestamp, since the user will need it */
2672 save_timestamp = cpu_buffer->read_stamp;
2674 /* Need to copy one event at a time */
2676 memcpy(bpage->data + pos, rpage->data + rpos, size);
2680 rb_advance_reader(cpu_buffer);
2681 rpos = reader->read;
2684 event = rb_reader_event(cpu_buffer);
2685 size = rb_event_length(event);
2686 } while (len > size);
2689 local_set(&bpage->commit, pos);
2690 bpage->time_stamp = save_timestamp;
2692 /* we copied everything to the beginning */
2695 /* swap the pages */
2696 rb_init_page(bpage);
2697 bpage = reader->page;
2698 reader->page = *data_page;
2699 local_set(&reader->write, 0);
2703 /* update the entry counter */
2704 rb_remove_entries(cpu_buffer, bpage, read);
2709 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2716 rb_simple_read(struct file *filp, char __user *ubuf,
2717 size_t cnt, loff_t *ppos)
2719 unsigned long *p = filp->private_data;
2723 if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
2724 r = sprintf(buf, "permanently disabled\n");
2726 r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
2728 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2732 rb_simple_write(struct file *filp, const char __user *ubuf,
2733 size_t cnt, loff_t *ppos)
2735 unsigned long *p = filp->private_data;
2740 if (cnt >= sizeof(buf))
2743 if (copy_from_user(&buf, ubuf, cnt))
2748 ret = strict_strtoul(buf, 10, &val);
2753 set_bit(RB_BUFFERS_ON_BIT, p);
2755 clear_bit(RB_BUFFERS_ON_BIT, p);
2762 static const struct file_operations rb_simple_fops = {
2763 .open = tracing_open_generic,
2764 .read = rb_simple_read,
2765 .write = rb_simple_write,
2769 static __init int rb_init_debugfs(void)
2771 struct dentry *d_tracer;
2772 struct dentry *entry;
2774 d_tracer = tracing_init_dentry();
2776 entry = debugfs_create_file("tracing_on", 0644, d_tracer,
2777 &ring_buffer_flags, &rb_simple_fops);
2779 pr_warning("Could not create debugfs 'tracing_on' entry\n");
2784 fs_initcall(rb_init_debugfs);
2786 #ifdef CONFIG_HOTPLUG_CPU
2787 static int rb_cpu_notify(struct notifier_block *self,
2788 unsigned long action, void *hcpu)
2790 struct ring_buffer *buffer =
2791 container_of(self, struct ring_buffer, cpu_notify);
2792 long cpu = (long)hcpu;
2795 case CPU_UP_PREPARE:
2796 case CPU_UP_PREPARE_FROZEN:
2797 if (cpu_isset(cpu, *buffer->cpumask))
2800 buffer->buffers[cpu] =
2801 rb_allocate_cpu_buffer(buffer, cpu);
2802 if (!buffer->buffers[cpu]) {
2803 WARN(1, "failed to allocate ring buffer on CPU %ld\n",
2808 cpu_set(cpu, *buffer->cpumask);
2810 case CPU_DOWN_PREPARE:
2811 case CPU_DOWN_PREPARE_FROZEN:
2814 * If we were to free the buffer, then the user would
2815 * lose any trace that was in the buffer.