Merge branch 'linus' into tracing/core
[safe/jmp/linux-2.6] / kernel / trace / ring_buffer.c
index 3ffa502..5885cdf 100644 (file)
 #include <linux/module.h>
 #include <linux/percpu.h>
 #include <linux/mutex.h>
+#include <linux/slab.h>
 #include <linux/init.h>
 #include <linux/hash.h>
 #include <linux/list.h>
 #include <linux/cpu.h>
 #include <linux/fs.h>
 
+#include <asm/local.h>
 #include "trace.h"
 
 /*
@@ -206,6 +208,14 @@ EXPORT_SYMBOL_GPL(tracing_is_on);
 #define RB_MAX_SMALL_DATA      (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
 #define RB_EVNT_MIN_SIZE       8U      /* two 32bit words */
 
+#if !defined(CONFIG_64BIT) || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+# define RB_FORCE_8BYTE_ALIGNMENT      0
+# define RB_ARCH_ALIGNMENT             RB_ALIGNMENT
+#else
+# define RB_FORCE_8BYTE_ALIGNMENT      1
+# define RB_ARCH_ALIGNMENT             8U
+#endif
+
 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
 
@@ -309,6 +319,11 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_data);
 #define TS_MASK                ((1ULL << TS_SHIFT) - 1)
 #define TS_DELTA_TEST  (~TS_MASK)
 
+/* Flag when events were overwritten */
+#define RB_MISSED_EVENTS       (1 << 31)
+/* Missed count stored at end */
+#define RB_MISSED_STORED       (1 << 30)
+
 struct buffer_data_page {
        u64              time_stamp;    /* page time stamp */
        local_t          commit;        /* write committed index */
@@ -328,6 +343,7 @@ struct buffer_page {
        local_t          write;         /* index for next write */
        unsigned         read;          /* index for next read */
        local_t          entries;       /* entries on this page */
+       unsigned long    real_end;      /* real end of data */
        struct buffer_data_page *page;  /* Actual data page */
 };
 
@@ -397,18 +413,27 @@ int ring_buffer_print_page_header(struct trace_seq *s)
        int ret;
 
        ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
-                              "offset:0;\tsize:%u;\n",
-                              (unsigned int)sizeof(field.time_stamp));
+                              "offset:0;\tsize:%u;\tsigned:%u;\n",
+                              (unsigned int)sizeof(field.time_stamp),
+                              (unsigned int)is_signed_type(u64));
 
        ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
-                              "offset:%u;\tsize:%u;\n",
+                              "offset:%u;\tsize:%u;\tsigned:%u;\n",
+                              (unsigned int)offsetof(typeof(field), commit),
+                              (unsigned int)sizeof(field.commit),
+                              (unsigned int)is_signed_type(long));
+
+       ret = trace_seq_printf(s, "\tfield: int overwrite;\t"
+                              "offset:%u;\tsize:%u;\tsigned:%u;\n",
                               (unsigned int)offsetof(typeof(field), commit),
-                              (unsigned int)sizeof(field.commit));
+                              1,
+                              (unsigned int)is_signed_type(long));
 
        ret = trace_seq_printf(s, "\tfield: char data;\t"
-                              "offset:%u;\tsize:%u;\n",
+                              "offset:%u;\tsize:%u;\tsigned:%u;\n",
                               (unsigned int)offsetof(typeof(field), data),
-                              (unsigned int)BUF_PAGE_SIZE);
+                              (unsigned int)BUF_PAGE_SIZE,
+                              (unsigned int)is_signed_type(char));
 
        return ret;
 }
@@ -420,13 +445,15 @@ struct ring_buffer_per_cpu {
        int                             cpu;
        struct ring_buffer              *buffer;
        spinlock_t                      reader_lock;    /* serialize readers */
-       raw_spinlock_t                  lock;
+       arch_spinlock_t                 lock;
        struct lock_class_key           lock_key;
        struct list_head                *pages;
        struct buffer_page              *head_page;     /* read from head */
        struct buffer_page              *tail_page;     /* write to tail */
        struct buffer_page              *commit_page;   /* committed pages */
        struct buffer_page              *reader_page;
+       unsigned long                   lost_events;
+       unsigned long                   last_overrun;
        local_t                         commit_overrun;
        local_t                         overrun;
        local_t                         entries;
@@ -461,6 +488,8 @@ struct ring_buffer_iter {
        struct ring_buffer_per_cpu      *cpu_buffer;
        unsigned long                   head;
        struct buffer_page              *head_page;
+       struct buffer_page              *cache_reader_page;
+       unsigned long                   cache_read;
        u64                             read_stamp;
 };
 
@@ -995,7 +1024,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
        cpu_buffer->buffer = buffer;
        spin_lock_init(&cpu_buffer->reader_lock);
        lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
-       cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+       cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
 
        bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
                            GFP_KERNEL, cpu_to_node(cpu));
@@ -1190,28 +1219,25 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
        struct list_head *p;
        unsigned i;
 
-       atomic_inc(&cpu_buffer->record_disabled);
-       synchronize_sched();
-
+       spin_lock_irq(&cpu_buffer->reader_lock);
        rb_head_page_deactivate(cpu_buffer);
 
        for (i = 0; i < nr_pages; i++) {
                if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
-                       return;
+                       goto out;
                p = cpu_buffer->pages->next;
                bpage = list_entry(p, struct buffer_page, list);
                list_del_init(&bpage->list);
                free_buffer_page(bpage);
        }
        if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
-               return;
+               goto out;
 
        rb_reset_cpu(cpu_buffer);
-
        rb_check_pages(cpu_buffer);
 
-       atomic_dec(&cpu_buffer->record_disabled);
-
+out:
+       spin_unlock_irq(&cpu_buffer->reader_lock);
 }
 
 static void
@@ -1222,26 +1248,22 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
        struct list_head *p;
        unsigned i;
 
-       atomic_inc(&cpu_buffer->record_disabled);
-       synchronize_sched();
-
        spin_lock_irq(&cpu_buffer->reader_lock);
        rb_head_page_deactivate(cpu_buffer);
 
        for (i = 0; i < nr_pages; i++) {
                if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
-                       return;
+                       goto out;
                p = pages->next;
                bpage = list_entry(p, struct buffer_page, list);
                list_del_init(&bpage->list);
                list_add_tail(&bpage->list, cpu_buffer->pages);
        }
        rb_reset_cpu(cpu_buffer);
-       spin_unlock_irq(&cpu_buffer->reader_lock);
-
        rb_check_pages(cpu_buffer);
 
-       atomic_dec(&cpu_buffer->record_disabled);
+out:
+       spin_unlock_irq(&cpu_buffer->reader_lock);
 }
 
 /**
@@ -1249,11 +1271,6 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
  * @buffer: the buffer to resize.
  * @size: the new size.
  *
- * The tracer is responsible for making sure that the buffer is
- * not being used while changing the size.
- * Note: We may be able to change the above requirement by using
- *  RCU synchronizations.
- *
  * Minimum size is 2 * BUF_PAGE_SIZE.
  *
  * Returns -1 on failure.
@@ -1285,6 +1302,11 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
        if (size == buffer_size)
                return size;
 
+       atomic_inc(&buffer->record_disabled);
+
+       /* Make sure all writers are done with this buffer. */
+       synchronize_sched();
+
        mutex_lock(&buffer->mutex);
        get_online_cpus();
 
@@ -1347,6 +1369,8 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
        put_online_cpus();
        mutex_unlock(&buffer->mutex);
 
+       atomic_dec(&buffer->record_disabled);
+
        return size;
 
  free_pages:
@@ -1356,6 +1380,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
        }
        put_online_cpus();
        mutex_unlock(&buffer->mutex);
+       atomic_dec(&buffer->record_disabled);
        return -ENOMEM;
 
        /*
@@ -1365,6 +1390,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
  out_fail:
        put_online_cpus();
        mutex_unlock(&buffer->mutex);
+       atomic_dec(&buffer->record_disabled);
        return -1;
 }
 EXPORT_SYMBOL_GPL(ring_buffer_resize);
@@ -1546,7 +1572,7 @@ rb_update_event(struct ring_buffer_event *event,
 
        case 0:
                length -= RB_EVNT_HDR_SIZE;
-               if (length > RB_MAX_SMALL_DATA)
+               if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
                        event->array[0] = length;
                else
                        event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
@@ -1721,11 +1747,11 @@ static unsigned rb_calculate_event_length(unsigned length)
        if (!length)
                length = 1;
 
-       if (length > RB_MAX_SMALL_DATA)
+       if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
                length += sizeof(event.array[0]);
 
        length += RB_EVNT_HDR_SIZE;
-       length = ALIGN(length, RB_ALIGNMENT);
+       length = ALIGN(length, RB_ARCH_ALIGNMENT);
 
        return length;
 }
@@ -1750,6 +1776,13 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
        kmemcheck_annotate_bitfield(event, bitfield);
 
        /*
+        * Save the original length to the meta data.
+        * This will be used by the reader to add lost event
+        * counter.
+        */
+       tail_page->real_end = tail;
+
+       /*
         * If this event is bigger than the minimum size, then
         * we need to be careful that we don't subtract the
         * write counter enough to allow another writer to slip
@@ -1785,9 +1818,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
 static struct ring_buffer_event *
 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
             unsigned long length, unsigned long tail,
-            struct buffer_page *commit_page,
             struct buffer_page *tail_page, u64 *ts)
 {
+       struct buffer_page *commit_page = cpu_buffer->commit_page;
        struct ring_buffer *buffer = cpu_buffer->buffer;
        struct buffer_page *next_page;
        int ret;
@@ -1890,13 +1923,10 @@ static struct ring_buffer_event *
 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
                  unsigned type, unsigned long length, u64 *ts)
 {
-       struct buffer_page *tail_page, *commit_page;
+       struct buffer_page *tail_page;
        struct ring_buffer_event *event;
        unsigned long tail, write;
 
-       commit_page = cpu_buffer->commit_page;
-       /* we just need to protect against interrupts */
-       barrier();
        tail_page = cpu_buffer->tail_page;
        write = local_add_return(length, &tail_page->write);
 
@@ -1907,7 +1937,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
        /* See if we shot pass the end of this buffer page */
        if (write > BUF_PAGE_SIZE)
                return rb_move_tail(cpu_buffer, length, tail,
-                                   commit_page, tail_page, ts);
+                                   tail_page, ts);
 
        /* We reserved something on the buffer */
 
@@ -2235,12 +2265,12 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
        if (ring_buffer_flags != RB_BUFFERS_ON)
                return NULL;
 
-       if (atomic_read(&buffer->record_disabled))
-               return NULL;
-
        /* If we are tracing schedule, we don't want to recurse */
        resched = ftrace_preempt_disable();
 
+       if (atomic_read(&buffer->record_disabled))
+               goto out_nocheck;
+
        if (trace_recursive_lock())
                goto out_nocheck;
 
@@ -2472,11 +2502,11 @@ int ring_buffer_write(struct ring_buffer *buffer,
        if (ring_buffer_flags != RB_BUFFERS_ON)
                return -EBUSY;
 
-       if (atomic_read(&buffer->record_disabled))
-               return -EBUSY;
-
        resched = ftrace_preempt_disable();
 
+       if (atomic_read(&buffer->record_disabled))
+               goto out;
+
        cpu = raw_smp_processor_id();
 
        if (!cpumask_test_cpu(cpu, buffer->cpumask))
@@ -2544,7 +2574,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
  * @buffer: The ring buffer to enable writes
  *
  * Note, multiple disables will need the same number of enables
- * to truely enable the writing (much like preempt_disable).
+ * to truly enable the writing (much like preempt_disable).
  */
 void ring_buffer_record_enable(struct ring_buffer *buffer)
 {
@@ -2580,7 +2610,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
  * @cpu: The CPU to enable.
  *
  * Note, multiple disables will need the same number of enables
- * to truely enable the writing (much like preempt_disable).
+ * to truly enable the writing (much like preempt_disable).
  */
 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
 {
@@ -2721,6 +2751,8 @@ static void rb_iter_reset(struct ring_buffer_iter *iter)
                iter->read_stamp = cpu_buffer->read_stamp;
        else
                iter->read_stamp = iter->head_page->page->time_stamp;
+       iter->cache_reader_page = cpu_buffer->reader_page;
+       iter->cache_read = cpu_buffer->read;
 }
 
 /**
@@ -2827,12 +2859,13 @@ static struct buffer_page *
 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
 {
        struct buffer_page *reader = NULL;
+       unsigned long overwrite;
        unsigned long flags;
        int nr_loops = 0;
        int ret;
 
        local_irq_save(flags);
-       __raw_spin_lock(&cpu_buffer->lock);
+       arch_spin_lock(&cpu_buffer->lock);
 
  again:
        /*
@@ -2868,13 +2901,14 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
        local_set(&cpu_buffer->reader_page->write, 0);
        local_set(&cpu_buffer->reader_page->entries, 0);
        local_set(&cpu_buffer->reader_page->page->commit, 0);
+       cpu_buffer->reader_page->real_end = 0;
 
  spin:
        /*
         * Splice the empty reader page into the list around the head.
         */
        reader = rb_set_head_page(cpu_buffer);
-       cpu_buffer->reader_page->list.next = reader->list.next;
+       cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
        cpu_buffer->reader_page->list.prev = reader->list.prev;
 
        /*
@@ -2888,6 +2922,18 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
        rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
 
        /*
+        * We want to make sure we read the overruns after we set up our
+        * pointers to the next object. The writer side does a
+        * cmpxchg to cross pages which acts as the mb on the writer
+        * side. Note, the reader will constantly fail the swap
+        * while the writer is updating the pointers, so this
+        * guarantees that the overwrite recorded here is the one we
+        * want to compare with the last_overrun.
+        */
+       smp_mb();
+       overwrite = local_read(&(cpu_buffer->overrun));
+
+       /*
         * Here's the tricky part.
         *
         * We need to move the pointer past the header page.
@@ -2911,17 +2957,22 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
         *
         * Now make the new head point back to the reader page.
         */
-       reader->list.next->prev = &cpu_buffer->reader_page->list;
+       rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
        rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
 
        /* Finally update the reader page to the new head */
        cpu_buffer->reader_page = reader;
        rb_reset_reader_page(cpu_buffer);
 
+       if (overwrite != cpu_buffer->last_overrun) {
+               cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
+               cpu_buffer->last_overrun = overwrite;
+       }
+
        goto again;
 
  out:
-       __raw_spin_unlock(&cpu_buffer->lock);
+       arch_spin_unlock(&cpu_buffer->lock);
        local_irq_restore(flags);
 
        return reader;
@@ -2994,8 +3045,14 @@ static void rb_advance_iter(struct ring_buffer_iter *iter)
                rb_advance_iter(iter);
 }
 
+static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
+{
+       return cpu_buffer->lost_events;
+}
+
 static struct ring_buffer_event *
-rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts)
+rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
+              unsigned long *lost_events)
 {
        struct ring_buffer_event *event;
        struct buffer_page *reader;
@@ -3047,6 +3104,8 @@ rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts)
                        ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
                                                         cpu_buffer->cpu, ts);
                }
+               if (lost_events)
+                       *lost_events = rb_lost_events(cpu_buffer);
                return event;
 
        default:
@@ -3065,13 +3124,22 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
        struct ring_buffer_event *event;
        int nr_loops = 0;
 
-       if (ring_buffer_iter_empty(iter))
-               return NULL;
-
        cpu_buffer = iter->cpu_buffer;
        buffer = cpu_buffer->buffer;
 
+       /*
+        * Check if someone performed a consuming read to
+        * the buffer. A consuming read invalidates the iterator
+        * and we need to reset the iterator in this case.
+        */
+       if (unlikely(iter->cache_read != cpu_buffer->read ||
+                    iter->cache_reader_page != cpu_buffer->reader_page))
+               rb_iter_reset(iter);
+
  again:
+       if (ring_buffer_iter_empty(iter))
+               return NULL;
+
        /*
         * We repeat when a timestamp is encountered.
         * We can get multiple timestamps by nested interrupts or also
@@ -3086,6 +3154,11 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
        if (rb_per_cpu_empty(cpu_buffer))
                return NULL;
 
+       if (iter->head >= local_read(&iter->head_page->page->commit)) {
+               rb_inc_iter(iter);
+               goto again;
+       }
+
        event = rb_iter_head_event(iter);
 
        switch (event->type_len) {
@@ -3143,12 +3216,14 @@ static inline int rb_ok_to_lock(void)
  * @buffer: The ring buffer to read
  * @cpu: The cpu to peak at
  * @ts: The timestamp counter of this event.
+ * @lost_events: a variable to store if events were lost (may be NULL)
  *
  * This will return the event that will be read next, but does
  * not consume the data.
  */
 struct ring_buffer_event *
-ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
+ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
+                unsigned long *lost_events)
 {
        struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
        struct ring_buffer_event *event;
@@ -3163,7 +3238,7 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
        local_irq_save(flags);
        if (dolock)
                spin_lock(&cpu_buffer->reader_lock);
-       event = rb_buffer_peek(cpu_buffer, ts);
+       event = rb_buffer_peek(cpu_buffer, ts, lost_events);
        if (event && event->type_len == RINGBUF_TYPE_PADDING)
                rb_advance_reader(cpu_buffer);
        if (dolock)
@@ -3205,13 +3280,17 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
 /**
  * ring_buffer_consume - return an event and consume it
  * @buffer: The ring buffer to get the next event from
+ * @cpu: the cpu to read the buffer from
+ * @ts: a variable to store the timestamp (may be NULL)
+ * @lost_events: a variable to store if events were lost (may be NULL)
  *
  * Returns the next event in the ring buffer, and that event is consumed.
  * Meaning, that sequential reads will keep returning a different event,
  * and eventually empty the ring buffer if the producer is slower.
  */
 struct ring_buffer_event *
-ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
+ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
+                   unsigned long *lost_events)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
        struct ring_buffer_event *event = NULL;
@@ -3232,9 +3311,11 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
        if (dolock)
                spin_lock(&cpu_buffer->reader_lock);
 
-       event = rb_buffer_peek(cpu_buffer, ts);
-       if (event)
+       event = rb_buffer_peek(cpu_buffer, ts, lost_events);
+       if (event) {
+               cpu_buffer->lost_events = 0;
                rb_advance_reader(cpu_buffer);
+       }
 
        if (dolock)
                spin_unlock(&cpu_buffer->reader_lock);
@@ -3284,9 +3365,9 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
        synchronize_sched();
 
        spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
-       __raw_spin_lock(&cpu_buffer->lock);
+       arch_spin_lock(&cpu_buffer->lock);
        rb_iter_reset(iter);
-       __raw_spin_unlock(&cpu_buffer->lock);
+       arch_spin_unlock(&cpu_buffer->lock);
        spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
 
        return iter;
@@ -3383,6 +3464,9 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
        cpu_buffer->write_stamp = 0;
        cpu_buffer->read_stamp = 0;
 
+       cpu_buffer->lost_events = 0;
+       cpu_buffer->last_overrun = 0;
+
        rb_head_page_activate(cpu_buffer);
 }
 
@@ -3406,11 +3490,11 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
        if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
                goto out;
 
-       __raw_spin_lock(&cpu_buffer->lock);
+       arch_spin_lock(&cpu_buffer->lock);
 
        rb_reset_cpu(cpu_buffer);
 
-       __raw_spin_unlock(&cpu_buffer->lock);
+       arch_spin_unlock(&cpu_buffer->lock);
 
  out:
        spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
@@ -3658,6 +3742,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
        struct ring_buffer_event *event;
        struct buffer_data_page *bpage;
        struct buffer_page *reader;
+       unsigned long missed_events;
        unsigned long flags;
        unsigned int commit;
        unsigned int read;
@@ -3694,6 +3779,9 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
        read = reader->read;
        commit = rb_page_commit(reader);
 
+       /* Check if any events were dropped */
+       missed_events = cpu_buffer->lost_events;
+
        /*
         * If this page has been partially read or
         * if len is not big enough to read the rest of the page or
@@ -3754,9 +3842,35 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
                local_set(&reader->entries, 0);
                reader->read = 0;
                *data_page = bpage;
+
+               /*
+                * Use the real_end for the data size,
+                * This gives us a chance to store the lost events
+                * on the page.
+                */
+               if (reader->real_end)
+                       local_set(&bpage->commit, reader->real_end);
        }
        ret = read;
 
+       cpu_buffer->lost_events = 0;
+       /*
+        * Set a flag in the commit field if we lost events
+        */
+       if (missed_events) {
+               commit = local_read(&bpage->commit);
+
+               /* If there is room at the end of the page to save the
+                * missed events, then record it there.
+                */
+               if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
+                       memcpy(&bpage->data[commit], &missed_events,
+                              sizeof(missed_events));
+                       local_add(RB_MISSED_STORED, &bpage->commit);
+               }
+               local_add(RB_MISSED_EVENTS, &bpage->commit);
+       }
+
  out_unlock:
        spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);