ring-buffer: move calculation of event length
[safe/jmp/linux-2.6] / kernel / trace / ring_buffer.c
index c792ea8..493cba4 100644 (file)
@@ -321,9 +321,10 @@ struct buffer_data_page {
 };
 
 struct buffer_page {
+       struct list_head list;          /* list of buffer pages */
        local_t          write;         /* index for next write */
        unsigned         read;          /* index for next read */
-       struct list_head list;          /* list of free pages */
+       local_t          entries;       /* entries on this page */
        struct buffer_data_page *page;  /* Actual data page */
 };
 
@@ -366,6 +367,9 @@ static inline int test_time_stamp(u64 delta)
 
 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
 
+/* Max payload is BUF_PAGE_SIZE - header (8bytes) */
+#define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
+
 int ring_buffer_print_page_header(struct trace_seq *s)
 {
        struct buffer_data_page field;
@@ -977,30 +981,6 @@ static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
        return rb_page_commit(cpu_buffer->head_page);
 }
 
-/*
- * When the tail hits the head and the buffer is in overwrite mode,
- * the head jumps to the next page and all content on the previous
- * page is discarded. But before doing so, we update the overrun
- * variable of the buffer.
- */
-static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
-{
-       struct ring_buffer_event *event;
-       unsigned long head;
-
-       for (head = 0; head < rb_head_size(cpu_buffer);
-            head += rb_event_length(event)) {
-
-               event = __rb_page_index(cpu_buffer->head_page, head);
-               if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
-                       return;
-               /* Only count data entries */
-               if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
-                       continue;
-               cpu_buffer->overrun++;
-       }
-}
-
 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
                               struct buffer_page **bpage)
 {
@@ -1177,133 +1157,156 @@ static unsigned rb_calculate_event_length(unsigned length)
        return length;
 }
 
+
 static struct ring_buffer_event *
-__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
-                 unsigned type, unsigned long length, u64 *ts)
+rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
+            unsigned long length, unsigned long tail,
+            struct buffer_page *commit_page,
+            struct buffer_page *tail_page, u64 *ts)
 {
-       struct buffer_page *tail_page, *head_page, *reader_page, *commit_page;
-       unsigned long tail, write;
+       struct buffer_page *next_page, *head_page, *reader_page;
        struct ring_buffer *buffer = cpu_buffer->buffer;
        struct ring_buffer_event *event;
-       unsigned long flags;
        bool lock_taken = false;
+       unsigned long flags;
 
-       commit_page = cpu_buffer->commit_page;
-       /* we just need to protect against interrupts */
-       barrier();
-       tail_page = cpu_buffer->tail_page;
-       write = local_add_return(length, &tail_page->write);
-       tail = write - length;
+       next_page = tail_page;
 
-       /* See if we shot pass the end of this buffer page */
-       if (write > BUF_PAGE_SIZE) {
-               struct buffer_page *next_page = tail_page;
+       local_irq_save(flags);
+       /*
+        * Since the write to the buffer is still not
+        * fully lockless, we must be careful with NMIs.
+        * The locks in the writers are taken when a write
+        * crosses to a new page. The locks protect against
+        * races with the readers (this will soon be fixed
+        * with a lockless solution).
+        *
+        * Because we can not protect against NMIs, and we
+        * want to keep traces reentrant, we need to manage
+        * what happens when we are in an NMI.
+        *
+        * NMIs can happen after we take the lock.
+        * If we are in an NMI, only take the lock
+        * if it is not already taken. Otherwise
+        * simply fail.
+        */
+       if (unlikely(in_nmi())) {
+               if (!__raw_spin_trylock(&cpu_buffer->lock)) {
+                       cpu_buffer->nmi_dropped++;
+                       goto out_reset;
+               }
+       } else
+               __raw_spin_lock(&cpu_buffer->lock);
 
-               local_irq_save(flags);
-               /*
-                * Since the write to the buffer is still not
-                * fully lockless, we must be careful with NMIs.
-                * The locks in the writers are taken when a write
-                * crosses to a new page. The locks protect against
-                * races with the readers (this will soon be fixed
-                * with a lockless solution).
-                *
-                * Because we can not protect against NMIs, and we
-                * want to keep traces reentrant, we need to manage
-                * what happens when we are in an NMI.
-                *
-                * NMIs can happen after we take the lock.
-                * If we are in an NMI, only take the lock
-                * if it is not already taken. Otherwise
-                * simply fail.
-                */
-               if (unlikely(in_nmi())) {
-                       if (!__raw_spin_trylock(&cpu_buffer->lock)) {
-                               cpu_buffer->nmi_dropped++;
-                               goto out_reset;
-                       }
-               } else
-                       __raw_spin_lock(&cpu_buffer->lock);
+       lock_taken = true;
 
-               lock_taken = true;
+       rb_inc_page(cpu_buffer, &next_page);
 
-               rb_inc_page(cpu_buffer, &next_page);
+       head_page = cpu_buffer->head_page;
+       reader_page = cpu_buffer->reader_page;
 
-               head_page = cpu_buffer->head_page;
-               reader_page = cpu_buffer->reader_page;
+       /* we grabbed the lock before incrementing */
+       if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
+               goto out_reset;
 
-               /* we grabbed the lock before incrementing */
-               if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
-                       goto out_reset;
+       /*
+        * If for some reason, we had an interrupt storm that made
+        * it all the way around the buffer, bail, and warn
+        * about it.
+        */
+       if (unlikely(next_page == commit_page)) {
+               cpu_buffer->commit_overrun++;
+               goto out_reset;
+       }
 
-               /*
-                * If for some reason, we had an interrupt storm that made
-                * it all the way around the buffer, bail, and warn
-                * about it.
-                */
-               if (unlikely(next_page == commit_page)) {
-                       cpu_buffer->commit_overrun++;
+       if (next_page == head_page) {
+               if (!(buffer->flags & RB_FL_OVERWRITE))
                        goto out_reset;
-               }
 
-               if (next_page == head_page) {
-                       if (!(buffer->flags & RB_FL_OVERWRITE))
-                               goto out_reset;
-
-                       /* tail_page has not moved yet? */
-                       if (tail_page == cpu_buffer->tail_page) {
-                               /* count overflows */
-                               rb_update_overflow(cpu_buffer);
+               /* tail_page has not moved yet? */
+               if (tail_page == cpu_buffer->tail_page) {
+                       /* count overflows */
+                       cpu_buffer->overrun +=
+                               local_read(&head_page->entries);
 
-                               rb_inc_page(cpu_buffer, &head_page);
-                               cpu_buffer->head_page = head_page;
-                               cpu_buffer->head_page->read = 0;
-                       }
+                       rb_inc_page(cpu_buffer, &head_page);
+                       cpu_buffer->head_page = head_page;
+                       cpu_buffer->head_page->read = 0;
                }
+       }
 
-               /*
-                * If the tail page is still the same as what we think
-                * it is, then it is up to us to update the tail
-                * pointer.
-                */
-               if (tail_page == cpu_buffer->tail_page) {
-                       local_set(&next_page->write, 0);
-                       local_set(&next_page->page->commit, 0);
-                       cpu_buffer->tail_page = next_page;
+       /*
+        * If the tail page is still the same as what we think
+        * it is, then it is up to us to update the tail
+        * pointer.
+        */
+       if (tail_page == cpu_buffer->tail_page) {
+               local_set(&next_page->write, 0);
+               local_set(&next_page->entries, 0);
+               local_set(&next_page->page->commit, 0);
+               cpu_buffer->tail_page = next_page;
+
+               /* reread the time stamp */
+               *ts = ring_buffer_time_stamp(buffer, cpu_buffer->cpu);
+               cpu_buffer->tail_page->page->time_stamp = *ts;
+       }
 
-                       /* reread the time stamp */
-                       *ts = ring_buffer_time_stamp(buffer, cpu_buffer->cpu);
-                       cpu_buffer->tail_page->page->time_stamp = *ts;
-               }
+       /*
+        * The actual tail page has moved forward.
+        */
+       if (tail < BUF_PAGE_SIZE) {
+               /* Mark the rest of the page with padding */
+               event = __rb_page_index(tail_page, tail);
+               rb_event_set_padding(event);
+       }
 
-               /*
-                * The actual tail page has moved forward.
-                */
-               if (tail < BUF_PAGE_SIZE) {
-                       /* Mark the rest of the page with padding */
-                       event = __rb_page_index(tail_page, tail);
-                       rb_event_set_padding(event);
-               }
+       /* Set the write back to the previous setting */
+       local_sub(length, &tail_page->write);
 
-               if (tail <= BUF_PAGE_SIZE)
-                       /* Set the write back to the previous setting */
-                       local_set(&tail_page->write, tail);
+       /*
+        * If this was a commit entry that failed,
+        * increment that too
+        */
+       if (tail_page == cpu_buffer->commit_page &&
+           tail == rb_commit_index(cpu_buffer)) {
+               rb_set_commit_to_write(cpu_buffer);
+       }
 
-               /*
-                * If this was a commit entry that failed,
-                * increment that too
-                */
-               if (tail_page == cpu_buffer->commit_page &&
-                   tail == rb_commit_index(cpu_buffer)) {
-                       rb_set_commit_to_write(cpu_buffer);
-               }
+       __raw_spin_unlock(&cpu_buffer->lock);
+       local_irq_restore(flags);
+
+       /* fail and let the caller try again */
+       return ERR_PTR(-EAGAIN);
+
+ out_reset:
+       /* reset write */
+       local_sub(length, &tail_page->write);
 
+       if (likely(lock_taken))
                __raw_spin_unlock(&cpu_buffer->lock);
-               local_irq_restore(flags);
+       local_irq_restore(flags);
+       return NULL;
+}
 
-               /* fail and let the caller try again */
-               return ERR_PTR(-EAGAIN);
-       }
+static struct ring_buffer_event *
+__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
+                 unsigned type, unsigned long length, u64 *ts)
+{
+       struct buffer_page *tail_page, *commit_page;
+       struct ring_buffer_event *event;
+       unsigned long tail, write;
+
+       commit_page = cpu_buffer->commit_page;
+       /* we just need to protect against interrupts */
+       barrier();
+       tail_page = cpu_buffer->tail_page;
+       write = local_add_return(length, &tail_page->write);
+       tail = write - length;
+
+       /* See if we shot pass the end of this buffer page */
+       if (write > BUF_PAGE_SIZE)
+               return rb_move_tail(cpu_buffer, length, tail,
+                                   commit_page, tail_page, ts);
 
        /* We reserved something on the buffer */
 
@@ -1313,6 +1316,10 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
        event = __rb_page_index(tail_page, tail);
        rb_update_event(event, type, length);
 
+       /* The passed in type is zero for DATA */
+       if (likely(!type))
+               local_inc(&tail_page->entries);
+
        /*
         * If this is a commit and the tail is zero, then update
         * this page's time stamp.
@@ -1321,16 +1328,6 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
                cpu_buffer->commit_page->page->time_stamp = *ts;
 
        return event;
-
- out_reset:
-       /* reset write */
-       if (tail <= BUF_PAGE_SIZE)
-               local_set(&tail_page->write, tail);
-
-       if (likely(lock_taken))
-               __raw_spin_unlock(&cpu_buffer->lock);
-       local_irq_restore(flags);
-       return NULL;
 }
 
 static int
@@ -1395,13 +1392,14 @@ rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
 
 static struct ring_buffer_event *
 rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
-                     unsigned type, unsigned long length)
+                     unsigned long length)
 {
        struct ring_buffer_event *event;
        u64 ts, delta;
        int commit = 0;
        int nr_loops = 0;
 
+       length = rb_calculate_event_length(length);
  again:
        /*
         * We allow for interrupts to reenter here and do a trace.
@@ -1454,7 +1452,7 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
                /* Non commits have zero deltas */
                delta = 0;
 
-       event = __rb_reserve_next(cpu_buffer, type, length, &ts);
+       event = __rb_reserve_next(cpu_buffer, 0, length, &ts);
        if (PTR_ERR(event) == -EAGAIN)
                goto again;
 
@@ -1558,11 +1556,10 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
        if (atomic_read(&cpu_buffer->record_disabled))
                goto out;
 
-       length = rb_calculate_event_length(length);
-       if (length > BUF_PAGE_SIZE)
+       if (length > BUF_MAX_DATA_SIZE)
                goto out;
 
-       event = rb_reserve_next_event(cpu_buffer, 0, length);
+       event = rb_reserve_next_event(cpu_buffer, length);
        if (!event)
                goto out;
 
@@ -1694,7 +1691,7 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer,
         * committed yet. Thus we can assume that preemption
         * is still disabled.
         */
-       RB_WARN_ON(buffer, !preempt_count());
+       RB_WARN_ON(buffer, preemptible());
 
        cpu = smp_processor_id();
        cpu_buffer = buffer->buffers[cpu];
@@ -1764,7 +1761,6 @@ int ring_buffer_write(struct ring_buffer *buffer,
 {
        struct ring_buffer_per_cpu *cpu_buffer;
        struct ring_buffer_event *event;
-       unsigned long event_length;
        void *body;
        int ret = -EBUSY;
        int cpu, resched;
@@ -1787,8 +1783,10 @@ int ring_buffer_write(struct ring_buffer *buffer,
        if (atomic_read(&cpu_buffer->record_disabled))
                goto out;
 
-       event_length = rb_calculate_event_length(length);
-       event = rb_reserve_next_event(cpu_buffer, 0, event_length);
+       if (length > BUF_MAX_DATA_SIZE)
+               goto out;
+
+       event = rb_reserve_next_event(cpu_buffer, length);
        if (!event)
                goto out;
 
@@ -2183,6 +2181,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
        cpu_buffer->reader_page->list.prev = reader->list.prev;
 
        local_set(&cpu_buffer->reader_page->write, 0);
+       local_set(&cpu_buffer->reader_page->entries, 0);
        local_set(&cpu_buffer->reader_page->page->commit, 0);
 
        /* Make the reader page now replace the head */
@@ -2629,6 +2628,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
        cpu_buffer->head_page
                = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
        local_set(&cpu_buffer->head_page->write, 0);
+       local_set(&cpu_buffer->head_page->entries, 0);
        local_set(&cpu_buffer->head_page->page->commit, 0);
 
        cpu_buffer->head_page->read = 0;
@@ -2638,6 +2638,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
 
        INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
        local_set(&cpu_buffer->reader_page->write, 0);
+       local_set(&cpu_buffer->reader_page->entries, 0);
        local_set(&cpu_buffer->reader_page->page->commit, 0);
        cpu_buffer->reader_page->read = 0;
 
@@ -2664,6 +2665,8 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
        if (!cpumask_test_cpu(cpu, buffer->cpumask))
                return;
 
+       atomic_inc(&cpu_buffer->record_disabled);
+
        spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
 
        __raw_spin_lock(&cpu_buffer->lock);
@@ -2673,6 +2676,8 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
        __raw_spin_unlock(&cpu_buffer->lock);
 
        spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+
+       atomic_dec(&cpu_buffer->record_disabled);
 }
 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
 
@@ -2799,28 +2804,6 @@ out:
 }
 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
 
-static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
-                             struct buffer_data_page *bpage,
-                             unsigned int offset)
-{
-       struct ring_buffer_event *event;
-       unsigned long head;
-
-       __raw_spin_lock(&cpu_buffer->lock);
-       for (head = offset; head < local_read(&bpage->commit);
-            head += rb_event_length(event)) {
-
-               event = __rb_data_page_index(bpage, head);
-               if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
-                       return;
-               /* Only count data entries */
-               if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
-                       continue;
-               cpu_buffer->read++;
-       }
-       __raw_spin_unlock(&cpu_buffer->lock);
-}
-
 /**
  * ring_buffer_alloc_read_page - allocate a page to read from buffer
  * @buffer: the buffer to allocate for.
@@ -2991,16 +2974,17 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
                /* we copied everything to the beginning */
                read = 0;
        } else {
+               /* update the entry counter */
+               cpu_buffer->read += local_read(&reader->entries);
+
                /* swap the pages */
                rb_init_page(bpage);
                bpage = reader->page;
                reader->page = *data_page;
                local_set(&reader->write, 0);
+               local_set(&reader->entries, 0);
                reader->read = 0;
                *data_page = bpage;
-
-               /* update the entry counter */
-               rb_remove_entries(cpu_buffer, bpage, read);
        }
        ret = read;