#include <linux/cpu.h>
#include <linux/fs.h>
+#include <asm/local.h>
#include "trace.h"
/*
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long head;
struct buffer_page *head_page;
+ struct buffer_page *cache_reader_page;
+ unsigned long cache_read;
u64 read_stamp;
};
cpu_buffer->buffer = buffer;
spin_lock_init(&cpu_buffer->reader_lock);
lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
- cpu_buffer->lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+ cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
GFP_KERNEL, cpu_to_node(cpu));
struct list_head *p;
unsigned i;
- atomic_inc(&cpu_buffer->record_disabled);
- synchronize_sched();
-
spin_lock_irq(&cpu_buffer->reader_lock);
rb_head_page_deactivate(cpu_buffer);
return;
rb_reset_cpu(cpu_buffer);
- spin_unlock_irq(&cpu_buffer->reader_lock);
-
rb_check_pages(cpu_buffer);
- atomic_dec(&cpu_buffer->record_disabled);
-
+ spin_unlock_irq(&cpu_buffer->reader_lock);
}
static void
struct list_head *p;
unsigned i;
- atomic_inc(&cpu_buffer->record_disabled);
- synchronize_sched();
-
spin_lock_irq(&cpu_buffer->reader_lock);
rb_head_page_deactivate(cpu_buffer);
list_add_tail(&bpage->list, cpu_buffer->pages);
}
rb_reset_cpu(cpu_buffer);
- spin_unlock_irq(&cpu_buffer->reader_lock);
-
rb_check_pages(cpu_buffer);
- atomic_dec(&cpu_buffer->record_disabled);
+ spin_unlock_irq(&cpu_buffer->reader_lock);
}
/**
* @buffer: the buffer to resize.
* @size: the new size.
*
- * The tracer is responsible for making sure that the buffer is
- * not being used while changing the size.
- * Note: We may be able to change the above requirement by using
- * RCU synchronizations.
- *
* Minimum size is 2 * BUF_PAGE_SIZE.
*
* Returns -1 on failure.
if (size == buffer_size)
return size;
+ atomic_inc(&buffer->record_disabled);
+
+ /* Make sure all writers are done with this buffer. */
+ synchronize_sched();
+
mutex_lock(&buffer->mutex);
get_online_cpus();
put_online_cpus();
mutex_unlock(&buffer->mutex);
+ atomic_dec(&buffer->record_disabled);
+
return size;
free_pages:
}
put_online_cpus();
mutex_unlock(&buffer->mutex);
+ atomic_dec(&buffer->record_disabled);
return -ENOMEM;
/*
out_fail:
put_online_cpus();
mutex_unlock(&buffer->mutex);
+ atomic_dec(&buffer->record_disabled);
return -1;
}
EXPORT_SYMBOL_GPL(ring_buffer_resize);
* @buffer: The ring buffer to enable writes
*
* Note, multiple disables will need the same number of enables
- * to truely enable the writing (much like preempt_disable).
+ * to truly enable the writing (much like preempt_disable).
*/
void ring_buffer_record_enable(struct ring_buffer *buffer)
{
* @cpu: The CPU to enable.
*
* Note, multiple disables will need the same number of enables
- * to truely enable the writing (much like preempt_disable).
+ * to truly enable the writing (much like preempt_disable).
*/
void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
{
iter->read_stamp = cpu_buffer->read_stamp;
else
iter->read_stamp = iter->head_page->page->time_stamp;
+ iter->cache_reader_page = cpu_buffer->reader_page;
+ iter->cache_read = cpu_buffer->read;
}
/**
int ret;
local_irq_save(flags);
- __raw_spin_lock(&cpu_buffer->lock);
+ arch_spin_lock(&cpu_buffer->lock);
again:
/*
* Splice the empty reader page into the list around the head.
*/
reader = rb_set_head_page(cpu_buffer);
- cpu_buffer->reader_page->list.next = reader->list.next;
+ cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
cpu_buffer->reader_page->list.prev = reader->list.prev;
/*
*
* Now make the new head point back to the reader page.
*/
- reader->list.next->prev = &cpu_buffer->reader_page->list;
+ rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
/* Finally update the reader page to the new head */
goto again;
out:
- __raw_spin_unlock(&cpu_buffer->lock);
+ arch_spin_unlock(&cpu_buffer->lock);
local_irq_restore(flags);
return reader;
struct ring_buffer_event *event;
int nr_loops = 0;
- if (ring_buffer_iter_empty(iter))
- return NULL;
-
cpu_buffer = iter->cpu_buffer;
buffer = cpu_buffer->buffer;
+ /*
+ * Check if someone performed a consuming read to
+ * the buffer. A consuming read invalidates the iterator
+ * and we need to reset the iterator in this case.
+ */
+ if (unlikely(iter->cache_read != cpu_buffer->read ||
+ iter->cache_reader_page != cpu_buffer->reader_page))
+ rb_iter_reset(iter);
+
again:
+ if (ring_buffer_iter_empty(iter))
+ return NULL;
+
/*
* We repeat when a timestamp is encountered.
* We can get multiple timestamps by nested interrupts or also
if (rb_per_cpu_empty(cpu_buffer))
return NULL;
+ if (iter->head >= local_read(&iter->head_page->page->commit)) {
+ rb_inc_iter(iter);
+ goto again;
+ }
+
event = rb_iter_head_event(iter);
switch (event->type_len) {
synchronize_sched();
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
- __raw_spin_lock(&cpu_buffer->lock);
+ arch_spin_lock(&cpu_buffer->lock);
rb_iter_reset(iter);
- __raw_spin_unlock(&cpu_buffer->lock);
+ arch_spin_unlock(&cpu_buffer->lock);
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
return iter;
if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
goto out;
- __raw_spin_lock(&cpu_buffer->lock);
+ arch_spin_lock(&cpu_buffer->lock);
rb_reset_cpu(cpu_buffer);
- __raw_spin_unlock(&cpu_buffer->lock);
+ arch_spin_unlock(&cpu_buffer->lock);
out:
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);