* Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
*/
#include <linux/ring_buffer.h>
+#include <linux/trace_clock.h>
+#include <linux/ftrace_irq.h>
#include <linux/spinlock.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h>
+#include <linux/hardirq.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/mutex.h>
-#include <linux/sched.h> /* used for sched_clock() (for now) */
#include <linux/init.h>
#include <linux/hash.h>
#include <linux/list.h>
RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
};
-static long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
+static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
+
+#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
/**
* tracing_on - enable all tracing buffers
* tracing_off_permanent - permanently disable ring buffers
*
* This function, once called, will disable all ring buffers
- * permanenty.
+ * permanently.
*/
void tracing_off_permanent(void)
{
set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
}
+/**
+ * tracing_is_on - show state of ring buffers enabled
+ */
+int tracing_is_on(void)
+{
+ return ring_buffer_flags == RB_BUFFERS_ON;
+}
+EXPORT_SYMBOL_GPL(tracing_is_on);
+
#include "trace.h"
/* Up this if you want to test the TIME_EXTENTS and normalization */
#define DEBUG_SHIFT 0
-/* FIXME!!! */
u64 ring_buffer_time_stamp(int cpu)
{
u64 time;
preempt_disable_notrace();
/* shift to debug/test normalization and TIME_EXTENTS */
- time = sched_clock() << DEBUG_SHIFT;
+ time = trace_clock_local() << DEBUG_SHIFT;
preempt_enable_no_resched_notrace();
return time;
}
EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
-#define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
+#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
#define RB_ALIGNMENT 4U
#define RB_MAX_SMALL_DATA 28
struct buffer_data_page {
u64 time_stamp; /* page time stamp */
- local_t commit; /* write commited index */
+ local_t commit; /* write committed index */
unsigned char data[]; /* data of buffer page */
};
local_set(&bpage->commit, 0);
}
+/**
+ * ring_buffer_page_len - the size of data on the page.
+ * @page: The page to read
+ *
+ * Returns the amount of data on the page, including buffer page header.
+ */
+size_t ring_buffer_page_len(void *page)
+{
+ return local_read(&((struct buffer_data_page *)page)->commit)
+ + BUF_PAGE_HDR_SIZE;
+}
+
/*
* Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
* this issue out.
return 0;
}
-#define BUF_PAGE_SIZE (PAGE_SIZE - offsetof(struct buffer_data_page, data))
+#define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
/*
* head_page == tail_page && head == tail then buffer is empty.
struct list_head pages;
struct buffer_page *head_page; /* read from head */
struct buffer_page *tail_page; /* write to tail */
- struct buffer_page *commit_page; /* commited pages */
+ struct buffer_page *commit_page; /* committed pages */
struct buffer_page *reader_page;
unsigned long overrun;
unsigned long entries;
unsigned pages;
unsigned flags;
int cpus;
- cpumask_var_t cpumask;
atomic_t record_disabled;
+ cpumask_var_t cpumask;
struct mutex mutex;
* check_pages - integrity check of buffer pages
* @cpu_buffer: CPU buffer with pages to test
*
- * As a safty measure we check to make sure the data pages have not
+ * As a safety measure we check to make sure the data pages have not
* been corrupted.
*/
static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
struct ring_buffer *buffer = cpu_buffer->buffer;
struct ring_buffer_event *event;
unsigned long flags;
+ bool lock_taken = false;
commit_page = cpu_buffer->commit_page;
/* we just need to protect against interrupts */
struct buffer_page *next_page = tail_page;
local_irq_save(flags);
- __raw_spin_lock(&cpu_buffer->lock);
+ /*
+ * Since the write to the buffer is still not
+ * fully lockless, we must be careful with NMIs.
+ * The locks in the writers are taken when a write
+ * crosses to a new page. The locks protect against
+ * races with the readers (this will soon be fixed
+ * with a lockless solution).
+ *
+ * Because we can not protect against NMIs, and we
+ * want to keep traces reentrant, we need to manage
+ * what happens when we are in an NMI.
+ *
+ * NMIs can happen after we take the lock.
+ * If we are in an NMI, only take the lock
+ * if it is not already taken. Otherwise
+ * simply fail.
+ */
+ if (unlikely(in_nmi())) {
+ if (!__raw_spin_trylock(&cpu_buffer->lock))
+ goto out_reset;
+ } else
+ __raw_spin_lock(&cpu_buffer->lock);
+
+ lock_taken = true;
rb_inc_page(cpu_buffer, &next_page);
/* we grabbed the lock before incrementing */
if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
- goto out_unlock;
+ goto out_reset;
/*
* If for some reason, we had an interrupt storm that made
*/
if (unlikely(next_page == commit_page)) {
WARN_ON_ONCE(1);
- goto out_unlock;
+ goto out_reset;
}
if (next_page == head_page) {
if (!(buffer->flags & RB_FL_OVERWRITE))
- goto out_unlock;
+ goto out_reset;
/* tail_page has not moved yet? */
if (tail_page == cpu_buffer->tail_page) {
return event;
- out_unlock:
+ out_reset:
/* reset write */
if (tail <= BUF_PAGE_SIZE)
local_set(&tail_page->write, tail);
- __raw_spin_unlock(&cpu_buffer->lock);
+ if (likely(lock_taken))
+ __raw_spin_unlock(&cpu_buffer->lock);
local_irq_restore(flags);
return NULL;
}
EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
- struct buffer_data_page *bpage)
+ struct buffer_data_page *bpage,
+ unsigned int offset)
{
struct ring_buffer_event *event;
unsigned long head;
__raw_spin_lock(&cpu_buffer->lock);
- for (head = 0; head < local_read(&bpage->commit);
+ for (head = offset; head < local_read(&bpage->commit);
head += rb_event_length(event)) {
event = __rb_data_page_index(bpage, head);
*/
void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
{
- unsigned long addr;
struct buffer_data_page *bpage;
+ unsigned long addr;
addr = __get_free_page(GFP_KERNEL);
if (!addr)
bpage = (void *)addr;
+ rb_init_page(bpage);
+
return bpage;
}
* ring_buffer_read_page - extract a page from the ring buffer
* @buffer: buffer to extract from
* @data_page: the page to use allocated from ring_buffer_alloc_read_page
+ * @len: amount to extract
* @cpu: the cpu of the buffer to extract
* @full: should the extraction only happen when the page is full.
*
* to swap with a page in the ring buffer.
*
* for example:
- * rpage = ring_buffer_alloc_page(buffer);
+ * rpage = ring_buffer_alloc_read_page(buffer);
* if (!rpage)
* return error;
- * ret = ring_buffer_read_page(buffer, &rpage, cpu, 0);
- * if (ret)
- * process_page(rpage);
+ * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
+ * if (ret >= 0)
+ * process_page(rpage, ret);
*
* When @full is set, the function will not return true unless
* the writer is off the reader page.
* responsible for that.
*
* Returns:
- * 1 if data has been transferred
- * 0 if no data has been transferred.
+ * >=0 if data has been transferred, returns the offset of consumed data.
+ * <0 if no data has been transferred.
*/
int ring_buffer_read_page(struct ring_buffer *buffer,
- void **data_page, int cpu, int full)
+ void **data_page, size_t len, int cpu, int full)
{
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
struct ring_buffer_event *event;
struct buffer_data_page *bpage;
+ struct buffer_page *reader;
unsigned long flags;
- int ret = 0;
+ unsigned int commit;
+ unsigned int read;
+ int ret = -1;
+
+ /*
+ * If len is not big enough to hold the page header, then
+ * we can not copy anything.
+ */
+ if (len <= BUF_PAGE_HDR_SIZE)
+ return -1;
+
+ len -= BUF_PAGE_HDR_SIZE;
if (!data_page)
- return 0;
+ return -1;
bpage = *data_page;
if (!bpage)
- return 0;
+ return -1;
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
- /*
- * rb_buffer_peek will get the next ring buffer if
- * the current reader page is empty.
- */
- event = rb_buffer_peek(buffer, cpu, NULL);
- if (!event)
+ reader = rb_get_reader_page(cpu_buffer);
+ if (!reader)
goto out;
- /* check for data */
- if (!local_read(&cpu_buffer->reader_page->page->commit))
- goto out;
+ event = rb_reader_event(cpu_buffer);
+
+ read = reader->read;
+ commit = rb_page_commit(reader);
+
/*
- * If the writer is already off of the read page, then simply
- * switch the read page with the given page. Otherwise
- * we need to copy the data from the reader to the writer.
+ * If this page has been partially read or
+ * if len is not big enough to read the rest of the page or
+ * a writer is still on the page, then
+ * we must copy the data from the page to the buffer.
+ * Otherwise, we can simply swap the page with the one passed in.
*/
- if (cpu_buffer->reader_page == cpu_buffer->commit_page) {
- unsigned int read = cpu_buffer->reader_page->read;
+ if (read || (len < (commit - read)) ||
+ cpu_buffer->reader_page == cpu_buffer->commit_page) {
+ struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
+ unsigned int rpos = read;
+ unsigned int pos = 0;
+ unsigned int size;
if (full)
goto out;
- /* The writer is still on the reader page, we must copy */
- bpage = cpu_buffer->reader_page->page;
- memcpy(bpage->data,
- cpu_buffer->reader_page->page->data + read,
- local_read(&bpage->commit) - read);
- /* consume what was read */
- cpu_buffer->reader_page += read;
+ if (len > (commit - read))
+ len = (commit - read);
+
+ size = rb_event_length(event);
+
+ if (len < size)
+ goto out;
+
+ /* Need to copy one event at a time */
+ do {
+ memcpy(bpage->data + pos, rpage->data + rpos, size);
+
+ len -= size;
+
+ rb_advance_reader(cpu_buffer);
+ rpos = reader->read;
+ pos += size;
+ event = rb_reader_event(cpu_buffer);
+ size = rb_event_length(event);
+ } while (len > size);
+
+ /* update bpage */
+ local_set(&bpage->commit, pos);
+ bpage->time_stamp = rpage->time_stamp;
+
+ /* we copied everything to the beginning */
+ read = 0;
} else {
/* swap the pages */
rb_init_page(bpage);
- bpage = cpu_buffer->reader_page->page;
- cpu_buffer->reader_page->page = *data_page;
- cpu_buffer->reader_page->read = 0;
+ bpage = reader->page;
+ reader->page = *data_page;
+ local_set(&reader->write, 0);
+ reader->read = 0;
*data_page = bpage;
+
+ /* update the entry counter */
+ rb_remove_entries(cpu_buffer, bpage, read);
}
- ret = 1;
+ ret = read;
- /* update the entry counter */
- rb_remove_entries(cpu_buffer, bpage);
out:
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
rb_simple_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
- long *p = filp->private_data;
+ unsigned long *p = filp->private_data;
char buf[64];
int r;
rb_simple_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
- long *p = filp->private_data;
+ unsigned long *p = filp->private_data;
char buf[64];
- long val;
+ unsigned long val;
int ret;
if (cnt >= sizeof(buf))