ftrace: Rename set_bootup_ftrace into set_cmdline_ftrace
[safe/jmp/linux-2.6] / kernel / trace / ring_buffer.c
index c8d2a66..d4ff019 100644 (file)
@@ -201,8 +201,6 @@ int tracing_is_on(void)
 }
 EXPORT_SYMBOL_GPL(tracing_is_on);
 
-#include "trace.h"
-
 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
 #define RB_ALIGNMENT           4U
 #define RB_MAX_SMALL_DATA      (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
@@ -467,14 +465,19 @@ struct ring_buffer_iter {
 };
 
 /* buffer may be either ring_buffer or ring_buffer_per_cpu */
-#define RB_WARN_ON(buffer, cond)                               \
-       ({                                                      \
-               int _____ret = unlikely(cond);                  \
-               if (_____ret) {                                 \
-                       atomic_inc(&buffer->record_disabled);   \
-                       WARN_ON(1);                             \
-               }                                               \
-               _____ret;                                       \
+#define RB_WARN_ON(b, cond)                                            \
+       ({                                                              \
+               int _____ret = unlikely(cond);                          \
+               if (_____ret) {                                         \
+                       if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
+                               struct ring_buffer_per_cpu *__b =       \
+                                       (void *)b;                      \
+                               atomic_inc(&__b->buffer->record_disabled); \
+                       } else                                          \
+                               atomic_inc(&b->record_disabled);        \
+                       WARN_ON(1);                                     \
+               }                                                       \
+               _____ret;                                               \
        })
 
 /* Up this if you want to test the TIME_EXTENTS and normalization */
@@ -696,8 +699,8 @@ static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
 
        val &= ~RB_FLAG_MASK;
 
-       ret = (unsigned long)cmpxchg(&list->next,
-                                    val | old_flag, val | new_flag);
+       ret = cmpxchg((unsigned long *)&list->next,
+                     val | old_flag, val | new_flag);
 
        /* check if the reader took the page */
        if ((ret & ~RB_FLAG_MASK) != val)
@@ -789,7 +792,7 @@ static int rb_head_page_replace(struct buffer_page *old,
        val = *ptr & ~RB_FLAG_MASK;
        val |= RB_PAGE_HEAD;
 
-       ret = cmpxchg(ptr, val, &new->list);
+       ret = cmpxchg(ptr, val, (unsigned long)&new->list);
 
        return ret == val;
 }
@@ -2068,7 +2071,8 @@ static void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
 }
 
 static struct ring_buffer_event *
-rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
+rb_reserve_next_event(struct ring_buffer *buffer,
+                     struct ring_buffer_per_cpu *cpu_buffer,
                      unsigned long length)
 {
        struct ring_buffer_event *event;
@@ -2078,6 +2082,21 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
 
        rb_start_commit(cpu_buffer);
 
+#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
+       /*
+        * Due to the ability to swap a cpu buffer from a buffer
+        * it is possible it was swapped before we committed.
+        * (committing stops a swap). We check for it here and
+        * if it happened, we have to fail the write.
+        */
+       barrier();
+       if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
+               local_dec(&cpu_buffer->committing);
+               local_dec(&cpu_buffer->commits);
+               return NULL;
+       }
+#endif
+
        length = rb_calculate_event_length(length);
  again:
        /*
@@ -2238,7 +2257,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
        if (length > BUF_MAX_DATA_SIZE)
                goto out;
 
-       event = rb_reserve_next_event(cpu_buffer, length);
+       event = rb_reserve_next_event(buffer, cpu_buffer, length);
        if (!event)
                goto out;
 
@@ -2471,7 +2490,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
        if (length > BUF_MAX_DATA_SIZE)
                goto out;
 
-       event = rb_reserve_next_event(cpu_buffer, length);
+       event = rb_reserve_next_event(buffer, cpu_buffer, length);
        if (!event)
                goto out;
 
@@ -2976,15 +2995,12 @@ static void rb_advance_iter(struct ring_buffer_iter *iter)
 }
 
 static struct ring_buffer_event *
-rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
+rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts)
 {
-       struct ring_buffer_per_cpu *cpu_buffer;
        struct ring_buffer_event *event;
        struct buffer_page *reader;
        int nr_loops = 0;
 
-       cpu_buffer = buffer->buffers[cpu];
-
  again:
        /*
         * We repeat when a timestamp is encountered. It is possible
@@ -3028,7 +3044,7 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
        case RINGBUF_TYPE_DATA:
                if (ts) {
                        *ts = cpu_buffer->read_stamp + event->time_delta;
-                       ring_buffer_normalize_time_stamp(buffer,
+                       ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
                                                         cpu_buffer->cpu, ts);
                }
                return event;
@@ -3147,7 +3163,7 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
        local_irq_save(flags);
        if (dolock)
                spin_lock(&cpu_buffer->reader_lock);
-       event = rb_buffer_peek(buffer, cpu, ts);
+       event = rb_buffer_peek(cpu_buffer, ts);
        if (event && event->type_len == RINGBUF_TYPE_PADDING)
                rb_advance_reader(cpu_buffer);
        if (dolock)
@@ -3216,7 +3232,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
        if (dolock)
                spin_lock(&cpu_buffer->reader_lock);
 
-       event = rb_buffer_peek(buffer, cpu, ts);
+       event = rb_buffer_peek(cpu_buffer, ts);
        if (event)
                rb_advance_reader(cpu_buffer);
 
@@ -3479,6 +3495,7 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
 }
 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
 
+#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
 /**
  * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
  * @buffer_a: One buffer to swap with
@@ -3554,6 +3571,7 @@ out:
        return ret;
 }
 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
+#endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
 
 /**
  * ring_buffer_alloc_read_page - allocate a page to read from buffer