ring-buffer: make the buffer a true circular link list
[safe/jmp/linux-2.6] / kernel / trace / ring_buffer.c
1 /*
2  * Generic ring buffer
3  *
4  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5  */
6 #include <linux/ring_buffer.h>
7 #include <linux/trace_clock.h>
8 #include <linux/ftrace_irq.h>
9 #include <linux/spinlock.h>
10 #include <linux/debugfs.h>
11 #include <linux/uaccess.h>
12 #include <linux/hardirq.h>
13 #include <linux/kmemcheck.h>
14 #include <linux/module.h>
15 #include <linux/percpu.h>
16 #include <linux/mutex.h>
17 #include <linux/init.h>
18 #include <linux/hash.h>
19 #include <linux/list.h>
20 #include <linux/cpu.h>
21 #include <linux/fs.h>
22
23 #include "trace.h"
24
25 /*
26  * The ring buffer header is special. We must manually up keep it.
27  */
28 int ring_buffer_print_entry_header(struct trace_seq *s)
29 {
30         int ret;
31
32         ret = trace_seq_printf(s, "# compressed entry header\n");
33         ret = trace_seq_printf(s, "\ttype_len    :    5 bits\n");
34         ret = trace_seq_printf(s, "\ttime_delta  :   27 bits\n");
35         ret = trace_seq_printf(s, "\tarray       :   32 bits\n");
36         ret = trace_seq_printf(s, "\n");
37         ret = trace_seq_printf(s, "\tpadding     : type == %d\n",
38                                RINGBUF_TYPE_PADDING);
39         ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
40                                RINGBUF_TYPE_TIME_EXTEND);
41         ret = trace_seq_printf(s, "\tdata max type_len  == %d\n",
42                                RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
43
44         return ret;
45 }
46
47 /*
48  * The ring buffer is made up of a list of pages. A separate list of pages is
49  * allocated for each CPU. A writer may only write to a buffer that is
50  * associated with the CPU it is currently executing on.  A reader may read
51  * from any per cpu buffer.
52  *
53  * The reader is special. For each per cpu buffer, the reader has its own
54  * reader page. When a reader has read the entire reader page, this reader
55  * page is swapped with another page in the ring buffer.
56  *
57  * Now, as long as the writer is off the reader page, the reader can do what
58  * ever it wants with that page. The writer will never write to that page
59  * again (as long as it is out of the ring buffer).
60  *
61  * Here's some silly ASCII art.
62  *
63  *   +------+
64  *   |reader|          RING BUFFER
65  *   |page  |
66  *   +------+        +---+   +---+   +---+
67  *                   |   |-->|   |-->|   |
68  *                   +---+   +---+   +---+
69  *                     ^               |
70  *                     |               |
71  *                     +---------------+
72  *
73  *
74  *   +------+
75  *   |reader|          RING BUFFER
76  *   |page  |------------------v
77  *   +------+        +---+   +---+   +---+
78  *                   |   |-->|   |-->|   |
79  *                   +---+   +---+   +---+
80  *                     ^               |
81  *                     |               |
82  *                     +---------------+
83  *
84  *
85  *   +------+
86  *   |reader|          RING BUFFER
87  *   |page  |------------------v
88  *   +------+        +---+   +---+   +---+
89  *      ^            |   |-->|   |-->|   |
90  *      |            +---+   +---+   +---+
91  *      |                              |
92  *      |                              |
93  *      +------------------------------+
94  *
95  *
96  *   +------+
97  *   |buffer|          RING BUFFER
98  *   |page  |------------------v
99  *   +------+        +---+   +---+   +---+
100  *      ^            |   |   |   |-->|   |
101  *      |   New      +---+   +---+   +---+
102  *      |  Reader------^               |
103  *      |   page                       |
104  *      +------------------------------+
105  *
106  *
107  * After we make this swap, the reader can hand this page off to the splice
108  * code and be done with it. It can even allocate a new page if it needs to
109  * and swap that into the ring buffer.
110  *
111  * We will be using cmpxchg soon to make all this lockless.
112  *
113  */
114
115 /*
116  * A fast way to enable or disable all ring buffers is to
117  * call tracing_on or tracing_off. Turning off the ring buffers
118  * prevents all ring buffers from being recorded to.
119  * Turning this switch on, makes it OK to write to the
120  * ring buffer, if the ring buffer is enabled itself.
121  *
122  * There's three layers that must be on in order to write
123  * to the ring buffer.
124  *
125  * 1) This global flag must be set.
126  * 2) The ring buffer must be enabled for recording.
127  * 3) The per cpu buffer must be enabled for recording.
128  *
129  * In case of an anomaly, this global flag has a bit set that
130  * will permantly disable all ring buffers.
131  */
132
133 /*
134  * Global flag to disable all recording to ring buffers
135  *  This has two bits: ON, DISABLED
136  *
137  *  ON   DISABLED
138  * ---- ----------
139  *   0      0        : ring buffers are off
140  *   1      0        : ring buffers are on
141  *   X      1        : ring buffers are permanently disabled
142  */
143
144 enum {
145         RB_BUFFERS_ON_BIT       = 0,
146         RB_BUFFERS_DISABLED_BIT = 1,
147 };
148
149 enum {
150         RB_BUFFERS_ON           = 1 << RB_BUFFERS_ON_BIT,
151         RB_BUFFERS_DISABLED     = 1 << RB_BUFFERS_DISABLED_BIT,
152 };
153
154 static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
155
156 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
157
158 /**
159  * tracing_on - enable all tracing buffers
160  *
161  * This function enables all tracing buffers that may have been
162  * disabled with tracing_off.
163  */
164 void tracing_on(void)
165 {
166         set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
167 }
168 EXPORT_SYMBOL_GPL(tracing_on);
169
170 /**
171  * tracing_off - turn off all tracing buffers
172  *
173  * This function stops all tracing buffers from recording data.
174  * It does not disable any overhead the tracers themselves may
175  * be causing. This function simply causes all recording to
176  * the ring buffers to fail.
177  */
178 void tracing_off(void)
179 {
180         clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
181 }
182 EXPORT_SYMBOL_GPL(tracing_off);
183
184 /**
185  * tracing_off_permanent - permanently disable ring buffers
186  *
187  * This function, once called, will disable all ring buffers
188  * permanently.
189  */
190 void tracing_off_permanent(void)
191 {
192         set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
193 }
194
195 /**
196  * tracing_is_on - show state of ring buffers enabled
197  */
198 int tracing_is_on(void)
199 {
200         return ring_buffer_flags == RB_BUFFERS_ON;
201 }
202 EXPORT_SYMBOL_GPL(tracing_is_on);
203
204 #include "trace.h"
205
206 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
207 #define RB_ALIGNMENT            4U
208 #define RB_MAX_SMALL_DATA       (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
209 #define RB_EVNT_MIN_SIZE        8U      /* two 32bit words */
210
211 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
212 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
213
214 enum {
215         RB_LEN_TIME_EXTEND = 8,
216         RB_LEN_TIME_STAMP = 16,
217 };
218
219 static inline int rb_null_event(struct ring_buffer_event *event)
220 {
221         return event->type_len == RINGBUF_TYPE_PADDING
222                         && event->time_delta == 0;
223 }
224
225 static inline int rb_discarded_event(struct ring_buffer_event *event)
226 {
227         return event->type_len == RINGBUF_TYPE_PADDING && event->time_delta;
228 }
229
230 static void rb_event_set_padding(struct ring_buffer_event *event)
231 {
232         event->type_len = RINGBUF_TYPE_PADDING;
233         event->time_delta = 0;
234 }
235
236 static unsigned
237 rb_event_data_length(struct ring_buffer_event *event)
238 {
239         unsigned length;
240
241         if (event->type_len)
242                 length = event->type_len * RB_ALIGNMENT;
243         else
244                 length = event->array[0];
245         return length + RB_EVNT_HDR_SIZE;
246 }
247
248 /* inline for ring buffer fast paths */
249 static unsigned
250 rb_event_length(struct ring_buffer_event *event)
251 {
252         switch (event->type_len) {
253         case RINGBUF_TYPE_PADDING:
254                 if (rb_null_event(event))
255                         /* undefined */
256                         return -1;
257                 return  event->array[0] + RB_EVNT_HDR_SIZE;
258
259         case RINGBUF_TYPE_TIME_EXTEND:
260                 return RB_LEN_TIME_EXTEND;
261
262         case RINGBUF_TYPE_TIME_STAMP:
263                 return RB_LEN_TIME_STAMP;
264
265         case RINGBUF_TYPE_DATA:
266                 return rb_event_data_length(event);
267         default:
268                 BUG();
269         }
270         /* not hit */
271         return 0;
272 }
273
274 /**
275  * ring_buffer_event_length - return the length of the event
276  * @event: the event to get the length of
277  */
278 unsigned ring_buffer_event_length(struct ring_buffer_event *event)
279 {
280         unsigned length = rb_event_length(event);
281         if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
282                 return length;
283         length -= RB_EVNT_HDR_SIZE;
284         if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
285                 length -= sizeof(event->array[0]);
286         return length;
287 }
288 EXPORT_SYMBOL_GPL(ring_buffer_event_length);
289
290 /* inline for ring buffer fast paths */
291 static void *
292 rb_event_data(struct ring_buffer_event *event)
293 {
294         BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
295         /* If length is in len field, then array[0] has the data */
296         if (event->type_len)
297                 return (void *)&event->array[0];
298         /* Otherwise length is in array[0] and array[1] has the data */
299         return (void *)&event->array[1];
300 }
301
302 /**
303  * ring_buffer_event_data - return the data of the event
304  * @event: the event to get the data from
305  */
306 void *ring_buffer_event_data(struct ring_buffer_event *event)
307 {
308         return rb_event_data(event);
309 }
310 EXPORT_SYMBOL_GPL(ring_buffer_event_data);
311
312 #define for_each_buffer_cpu(buffer, cpu)                \
313         for_each_cpu(cpu, buffer->cpumask)
314
315 #define TS_SHIFT        27
316 #define TS_MASK         ((1ULL << TS_SHIFT) - 1)
317 #define TS_DELTA_TEST   (~TS_MASK)
318
319 struct buffer_data_page {
320         u64              time_stamp;    /* page time stamp */
321         local_t          commit;        /* write committed index */
322         unsigned char    data[];        /* data of buffer page */
323 };
324
325 struct buffer_page {
326         struct list_head list;          /* list of buffer pages */
327         local_t          write;         /* index for next write */
328         unsigned         read;          /* index for next read */
329         local_t          entries;       /* entries on this page */
330         struct buffer_data_page *page;  /* Actual data page */
331 };
332
333 static void rb_init_page(struct buffer_data_page *bpage)
334 {
335         local_set(&bpage->commit, 0);
336 }
337
338 /**
339  * ring_buffer_page_len - the size of data on the page.
340  * @page: The page to read
341  *
342  * Returns the amount of data on the page, including buffer page header.
343  */
344 size_t ring_buffer_page_len(void *page)
345 {
346         return local_read(&((struct buffer_data_page *)page)->commit)
347                 + BUF_PAGE_HDR_SIZE;
348 }
349
350 /*
351  * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
352  * this issue out.
353  */
354 static void free_buffer_page(struct buffer_page *bpage)
355 {
356         free_page((unsigned long)bpage->page);
357         kfree(bpage);
358 }
359
360 /*
361  * We need to fit the time_stamp delta into 27 bits.
362  */
363 static inline int test_time_stamp(u64 delta)
364 {
365         if (delta & TS_DELTA_TEST)
366                 return 1;
367         return 0;
368 }
369
370 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
371
372 /* Max payload is BUF_PAGE_SIZE - header (8bytes) */
373 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
374
375 /* Max number of timestamps that can fit on a page */
376 #define RB_TIMESTAMPS_PER_PAGE  (BUF_PAGE_SIZE / RB_LEN_TIME_STAMP)
377
378 int ring_buffer_print_page_header(struct trace_seq *s)
379 {
380         struct buffer_data_page field;
381         int ret;
382
383         ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
384                                "offset:0;\tsize:%u;\n",
385                                (unsigned int)sizeof(field.time_stamp));
386
387         ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
388                                "offset:%u;\tsize:%u;\n",
389                                (unsigned int)offsetof(typeof(field), commit),
390                                (unsigned int)sizeof(field.commit));
391
392         ret = trace_seq_printf(s, "\tfield: char data;\t"
393                                "offset:%u;\tsize:%u;\n",
394                                (unsigned int)offsetof(typeof(field), data),
395                                (unsigned int)BUF_PAGE_SIZE);
396
397         return ret;
398 }
399
400 /*
401  * head_page == tail_page && head == tail then buffer is empty.
402  */
403 struct ring_buffer_per_cpu {
404         int                             cpu;
405         struct ring_buffer              *buffer;
406         spinlock_t                      reader_lock; /* serialize readers */
407         raw_spinlock_t                  lock;
408         struct lock_class_key           lock_key;
409         struct list_head                *pages;
410         struct buffer_page              *head_page;     /* read from head */
411         struct buffer_page              *tail_page;     /* write to tail */
412         struct buffer_page              *commit_page;   /* committed pages */
413         struct buffer_page              *reader_page;
414         unsigned long                   nmi_dropped;
415         unsigned long                   commit_overrun;
416         unsigned long                   overrun;
417         unsigned long                   read;
418         local_t                         entries;
419         local_t                         committing;
420         local_t                         commits;
421         u64                             write_stamp;
422         u64                             read_stamp;
423         atomic_t                        record_disabled;
424 };
425
426 struct ring_buffer {
427         unsigned                        pages;
428         unsigned                        flags;
429         int                             cpus;
430         atomic_t                        record_disabled;
431         cpumask_var_t                   cpumask;
432
433         struct lock_class_key           *reader_lock_key;
434
435         struct mutex                    mutex;
436
437         struct ring_buffer_per_cpu      **buffers;
438
439 #ifdef CONFIG_HOTPLUG_CPU
440         struct notifier_block           cpu_notify;
441 #endif
442         u64                             (*clock)(void);
443 };
444
445 struct ring_buffer_iter {
446         struct ring_buffer_per_cpu      *cpu_buffer;
447         unsigned long                   head;
448         struct buffer_page              *head_page;
449         u64                             read_stamp;
450 };
451
452 /* buffer may be either ring_buffer or ring_buffer_per_cpu */
453 #define RB_WARN_ON(buffer, cond)                                \
454         ({                                                      \
455                 int _____ret = unlikely(cond);                  \
456                 if (_____ret) {                                 \
457                         atomic_inc(&buffer->record_disabled);   \
458                         WARN_ON(1);                             \
459                 }                                               \
460                 _____ret;                                       \
461         })
462
463 /* Up this if you want to test the TIME_EXTENTS and normalization */
464 #define DEBUG_SHIFT 0
465
466 static inline u64 rb_time_stamp(struct ring_buffer *buffer, int cpu)
467 {
468         /* shift to debug/test normalization and TIME_EXTENTS */
469         return buffer->clock() << DEBUG_SHIFT;
470 }
471
472 u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
473 {
474         u64 time;
475
476         preempt_disable_notrace();
477         time = rb_time_stamp(buffer, cpu);
478         preempt_enable_no_resched_notrace();
479
480         return time;
481 }
482 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
483
484 void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
485                                       int cpu, u64 *ts)
486 {
487         /* Just stupid testing the normalize function and deltas */
488         *ts >>= DEBUG_SHIFT;
489 }
490 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
491
492 /**
493  * check_pages - integrity check of buffer pages
494  * @cpu_buffer: CPU buffer with pages to test
495  *
496  * As a safety measure we check to make sure the data pages have not
497  * been corrupted.
498  */
499 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
500 {
501         struct list_head *head = cpu_buffer->pages;
502         struct buffer_page *bpage, *tmp;
503
504         if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
505                 return -1;
506         if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
507                 return -1;
508
509         list_for_each_entry_safe(bpage, tmp, head, list) {
510                 if (RB_WARN_ON(cpu_buffer,
511                                bpage->list.next->prev != &bpage->list))
512                         return -1;
513                 if (RB_WARN_ON(cpu_buffer,
514                                bpage->list.prev->next != &bpage->list))
515                         return -1;
516         }
517
518         return 0;
519 }
520
521 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
522                              unsigned nr_pages)
523 {
524         struct buffer_page *bpage, *tmp;
525         unsigned long addr;
526         LIST_HEAD(pages);
527         unsigned i;
528
529         WARN_ON(!nr_pages);
530
531         for (i = 0; i < nr_pages; i++) {
532                 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
533                                     GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
534                 if (!bpage)
535                         goto free_pages;
536                 list_add(&bpage->list, &pages);
537
538                 addr = __get_free_page(GFP_KERNEL);
539                 if (!addr)
540                         goto free_pages;
541                 bpage->page = (void *)addr;
542                 rb_init_page(bpage->page);
543         }
544
545         /*
546          * The ring buffer page list is a circular list that does not
547          * start and end with a list head. All page list items point to
548          * other pages.
549          */
550         cpu_buffer->pages = pages.next;
551         list_del(&pages);
552
553         rb_check_pages(cpu_buffer);
554
555         return 0;
556
557  free_pages:
558         list_for_each_entry_safe(bpage, tmp, &pages, list) {
559                 list_del_init(&bpage->list);
560                 free_buffer_page(bpage);
561         }
562         return -ENOMEM;
563 }
564
565 static struct ring_buffer_per_cpu *
566 rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
567 {
568         struct ring_buffer_per_cpu *cpu_buffer;
569         struct buffer_page *bpage;
570         unsigned long addr;
571         int ret;
572
573         cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
574                                   GFP_KERNEL, cpu_to_node(cpu));
575         if (!cpu_buffer)
576                 return NULL;
577
578         cpu_buffer->cpu = cpu;
579         cpu_buffer->buffer = buffer;
580         spin_lock_init(&cpu_buffer->reader_lock);
581         lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
582         cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
583
584         bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
585                             GFP_KERNEL, cpu_to_node(cpu));
586         if (!bpage)
587                 goto fail_free_buffer;
588
589         cpu_buffer->reader_page = bpage;
590         addr = __get_free_page(GFP_KERNEL);
591         if (!addr)
592                 goto fail_free_reader;
593         bpage->page = (void *)addr;
594         rb_init_page(bpage->page);
595
596         INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
597
598         ret = rb_allocate_pages(cpu_buffer, buffer->pages);
599         if (ret < 0)
600                 goto fail_free_reader;
601
602         cpu_buffer->head_page
603                 = list_entry(cpu_buffer->pages, struct buffer_page, list);
604         cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
605
606         return cpu_buffer;
607
608  fail_free_reader:
609         free_buffer_page(cpu_buffer->reader_page);
610
611  fail_free_buffer:
612         kfree(cpu_buffer);
613         return NULL;
614 }
615
616 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
617 {
618         struct list_head *head = cpu_buffer->pages;
619         struct buffer_page *bpage, *tmp;
620
621         free_buffer_page(cpu_buffer->reader_page);
622
623         if (head) {
624                 list_for_each_entry_safe(bpage, tmp, head, list) {
625                         list_del_init(&bpage->list);
626                         free_buffer_page(bpage);
627                 }
628                 bpage = list_entry(head, struct buffer_page, list);
629                 free_buffer_page(bpage);
630         }
631
632         kfree(cpu_buffer);
633 }
634
635 #ifdef CONFIG_HOTPLUG_CPU
636 static int rb_cpu_notify(struct notifier_block *self,
637                          unsigned long action, void *hcpu);
638 #endif
639
640 /**
641  * ring_buffer_alloc - allocate a new ring_buffer
642  * @size: the size in bytes per cpu that is needed.
643  * @flags: attributes to set for the ring buffer.
644  *
645  * Currently the only flag that is available is the RB_FL_OVERWRITE
646  * flag. This flag means that the buffer will overwrite old data
647  * when the buffer wraps. If this flag is not set, the buffer will
648  * drop data when the tail hits the head.
649  */
650 struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
651                                         struct lock_class_key *key)
652 {
653         struct ring_buffer *buffer;
654         int bsize;
655         int cpu;
656
657         /* keep it in its own cache line */
658         buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
659                          GFP_KERNEL);
660         if (!buffer)
661                 return NULL;
662
663         if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
664                 goto fail_free_buffer;
665
666         buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
667         buffer->flags = flags;
668         buffer->clock = trace_clock_local;
669         buffer->reader_lock_key = key;
670
671         /* need at least two pages */
672         if (buffer->pages < 2)
673                 buffer->pages = 2;
674
675         /*
676          * In case of non-hotplug cpu, if the ring-buffer is allocated
677          * in early initcall, it will not be notified of secondary cpus.
678          * In that off case, we need to allocate for all possible cpus.
679          */
680 #ifdef CONFIG_HOTPLUG_CPU
681         get_online_cpus();
682         cpumask_copy(buffer->cpumask, cpu_online_mask);
683 #else
684         cpumask_copy(buffer->cpumask, cpu_possible_mask);
685 #endif
686         buffer->cpus = nr_cpu_ids;
687
688         bsize = sizeof(void *) * nr_cpu_ids;
689         buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
690                                   GFP_KERNEL);
691         if (!buffer->buffers)
692                 goto fail_free_cpumask;
693
694         for_each_buffer_cpu(buffer, cpu) {
695                 buffer->buffers[cpu] =
696                         rb_allocate_cpu_buffer(buffer, cpu);
697                 if (!buffer->buffers[cpu])
698                         goto fail_free_buffers;
699         }
700
701 #ifdef CONFIG_HOTPLUG_CPU
702         buffer->cpu_notify.notifier_call = rb_cpu_notify;
703         buffer->cpu_notify.priority = 0;
704         register_cpu_notifier(&buffer->cpu_notify);
705 #endif
706
707         put_online_cpus();
708         mutex_init(&buffer->mutex);
709
710         return buffer;
711
712  fail_free_buffers:
713         for_each_buffer_cpu(buffer, cpu) {
714                 if (buffer->buffers[cpu])
715                         rb_free_cpu_buffer(buffer->buffers[cpu]);
716         }
717         kfree(buffer->buffers);
718
719  fail_free_cpumask:
720         free_cpumask_var(buffer->cpumask);
721         put_online_cpus();
722
723  fail_free_buffer:
724         kfree(buffer);
725         return NULL;
726 }
727 EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
728
729 /**
730  * ring_buffer_free - free a ring buffer.
731  * @buffer: the buffer to free.
732  */
733 void
734 ring_buffer_free(struct ring_buffer *buffer)
735 {
736         int cpu;
737
738         get_online_cpus();
739
740 #ifdef CONFIG_HOTPLUG_CPU
741         unregister_cpu_notifier(&buffer->cpu_notify);
742 #endif
743
744         for_each_buffer_cpu(buffer, cpu)
745                 rb_free_cpu_buffer(buffer->buffers[cpu]);
746
747         put_online_cpus();
748
749         free_cpumask_var(buffer->cpumask);
750
751         kfree(buffer);
752 }
753 EXPORT_SYMBOL_GPL(ring_buffer_free);
754
755 void ring_buffer_set_clock(struct ring_buffer *buffer,
756                            u64 (*clock)(void))
757 {
758         buffer->clock = clock;
759 }
760
761 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
762
763 static void
764 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
765 {
766         struct buffer_page *bpage;
767         struct list_head *p;
768         unsigned i;
769
770         atomic_inc(&cpu_buffer->record_disabled);
771         synchronize_sched();
772
773         for (i = 0; i < nr_pages; i++) {
774                 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
775                         return;
776                 p = cpu_buffer->pages->next;
777                 bpage = list_entry(p, struct buffer_page, list);
778                 list_del_init(&bpage->list);
779                 free_buffer_page(bpage);
780         }
781         if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
782                 return;
783
784         rb_reset_cpu(cpu_buffer);
785
786         rb_check_pages(cpu_buffer);
787
788         atomic_dec(&cpu_buffer->record_disabled);
789
790 }
791
792 static void
793 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
794                 struct list_head *pages, unsigned nr_pages)
795 {
796         struct buffer_page *bpage;
797         struct list_head *p;
798         unsigned i;
799
800         atomic_inc(&cpu_buffer->record_disabled);
801         synchronize_sched();
802
803         for (i = 0; i < nr_pages; i++) {
804                 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
805                         return;
806                 p = pages->next;
807                 bpage = list_entry(p, struct buffer_page, list);
808                 list_del_init(&bpage->list);
809                 list_add_tail(&bpage->list, cpu_buffer->pages);
810         }
811         rb_reset_cpu(cpu_buffer);
812
813         rb_check_pages(cpu_buffer);
814
815         atomic_dec(&cpu_buffer->record_disabled);
816 }
817
818 /**
819  * ring_buffer_resize - resize the ring buffer
820  * @buffer: the buffer to resize.
821  * @size: the new size.
822  *
823  * The tracer is responsible for making sure that the buffer is
824  * not being used while changing the size.
825  * Note: We may be able to change the above requirement by using
826  *  RCU synchronizations.
827  *
828  * Minimum size is 2 * BUF_PAGE_SIZE.
829  *
830  * Returns -1 on failure.
831  */
832 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
833 {
834         struct ring_buffer_per_cpu *cpu_buffer;
835         unsigned nr_pages, rm_pages, new_pages;
836         struct buffer_page *bpage, *tmp;
837         unsigned long buffer_size;
838         unsigned long addr;
839         LIST_HEAD(pages);
840         int i, cpu;
841
842         /*
843          * Always succeed at resizing a non-existent buffer:
844          */
845         if (!buffer)
846                 return size;
847
848         size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
849         size *= BUF_PAGE_SIZE;
850         buffer_size = buffer->pages * BUF_PAGE_SIZE;
851
852         /* we need a minimum of two pages */
853         if (size < BUF_PAGE_SIZE * 2)
854                 size = BUF_PAGE_SIZE * 2;
855
856         if (size == buffer_size)
857                 return size;
858
859         mutex_lock(&buffer->mutex);
860         get_online_cpus();
861
862         nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
863
864         if (size < buffer_size) {
865
866                 /* easy case, just free pages */
867                 if (RB_WARN_ON(buffer, nr_pages >= buffer->pages))
868                         goto out_fail;
869
870                 rm_pages = buffer->pages - nr_pages;
871
872                 for_each_buffer_cpu(buffer, cpu) {
873                         cpu_buffer = buffer->buffers[cpu];
874                         rb_remove_pages(cpu_buffer, rm_pages);
875                 }
876                 goto out;
877         }
878
879         /*
880          * This is a bit more difficult. We only want to add pages
881          * when we can allocate enough for all CPUs. We do this
882          * by allocating all the pages and storing them on a local
883          * link list. If we succeed in our allocation, then we
884          * add these pages to the cpu_buffers. Otherwise we just free
885          * them all and return -ENOMEM;
886          */
887         if (RB_WARN_ON(buffer, nr_pages <= buffer->pages))
888                 goto out_fail;
889
890         new_pages = nr_pages - buffer->pages;
891
892         for_each_buffer_cpu(buffer, cpu) {
893                 for (i = 0; i < new_pages; i++) {
894                         bpage = kzalloc_node(ALIGN(sizeof(*bpage),
895                                                   cache_line_size()),
896                                             GFP_KERNEL, cpu_to_node(cpu));
897                         if (!bpage)
898                                 goto free_pages;
899                         list_add(&bpage->list, &pages);
900                         addr = __get_free_page(GFP_KERNEL);
901                         if (!addr)
902                                 goto free_pages;
903                         bpage->page = (void *)addr;
904                         rb_init_page(bpage->page);
905                 }
906         }
907
908         for_each_buffer_cpu(buffer, cpu) {
909                 cpu_buffer = buffer->buffers[cpu];
910                 rb_insert_pages(cpu_buffer, &pages, new_pages);
911         }
912
913         if (RB_WARN_ON(buffer, !list_empty(&pages)))
914                 goto out_fail;
915
916  out:
917         buffer->pages = nr_pages;
918         put_online_cpus();
919         mutex_unlock(&buffer->mutex);
920
921         return size;
922
923  free_pages:
924         list_for_each_entry_safe(bpage, tmp, &pages, list) {
925                 list_del_init(&bpage->list);
926                 free_buffer_page(bpage);
927         }
928         put_online_cpus();
929         mutex_unlock(&buffer->mutex);
930         return -ENOMEM;
931
932         /*
933          * Something went totally wrong, and we are too paranoid
934          * to even clean up the mess.
935          */
936  out_fail:
937         put_online_cpus();
938         mutex_unlock(&buffer->mutex);
939         return -1;
940 }
941 EXPORT_SYMBOL_GPL(ring_buffer_resize);
942
943 static inline void *
944 __rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
945 {
946         return bpage->data + index;
947 }
948
949 static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
950 {
951         return bpage->page->data + index;
952 }
953
954 static inline struct ring_buffer_event *
955 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
956 {
957         return __rb_page_index(cpu_buffer->reader_page,
958                                cpu_buffer->reader_page->read);
959 }
960
961 static inline struct ring_buffer_event *
962 rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
963 {
964         return __rb_page_index(cpu_buffer->head_page,
965                                cpu_buffer->head_page->read);
966 }
967
968 static inline struct ring_buffer_event *
969 rb_iter_head_event(struct ring_buffer_iter *iter)
970 {
971         return __rb_page_index(iter->head_page, iter->head);
972 }
973
974 static inline unsigned rb_page_write(struct buffer_page *bpage)
975 {
976         return local_read(&bpage->write);
977 }
978
979 static inline unsigned rb_page_commit(struct buffer_page *bpage)
980 {
981         return local_read(&bpage->page->commit);
982 }
983
984 /* Size is determined by what has been commited */
985 static inline unsigned rb_page_size(struct buffer_page *bpage)
986 {
987         return rb_page_commit(bpage);
988 }
989
990 static inline unsigned
991 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
992 {
993         return rb_page_commit(cpu_buffer->commit_page);
994 }
995
996 static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
997 {
998         return rb_page_commit(cpu_buffer->head_page);
999 }
1000
1001 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
1002                                struct buffer_page **bpage)
1003 {
1004         struct list_head *p = (*bpage)->list.next;
1005
1006         *bpage = list_entry(p, struct buffer_page, list);
1007 }
1008
1009 static inline unsigned
1010 rb_event_index(struct ring_buffer_event *event)
1011 {
1012         unsigned long addr = (unsigned long)event;
1013
1014         return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
1015 }
1016
1017 static inline int
1018 rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
1019                    struct ring_buffer_event *event)
1020 {
1021         unsigned long addr = (unsigned long)event;
1022         unsigned long index;
1023
1024         index = rb_event_index(event);
1025         addr &= PAGE_MASK;
1026
1027         return cpu_buffer->commit_page->page == (void *)addr &&
1028                 rb_commit_index(cpu_buffer) == index;
1029 }
1030
1031 static void
1032 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
1033 {
1034         /*
1035          * We only race with interrupts and NMIs on this CPU.
1036          * If we own the commit event, then we can commit
1037          * all others that interrupted us, since the interruptions
1038          * are in stack format (they finish before they come
1039          * back to us). This allows us to do a simple loop to
1040          * assign the commit to the tail.
1041          */
1042  again:
1043         while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
1044                 cpu_buffer->commit_page->page->commit =
1045                         cpu_buffer->commit_page->write;
1046                 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
1047                 cpu_buffer->write_stamp =
1048                         cpu_buffer->commit_page->page->time_stamp;
1049                 /* add barrier to keep gcc from optimizing too much */
1050                 barrier();
1051         }
1052         while (rb_commit_index(cpu_buffer) !=
1053                rb_page_write(cpu_buffer->commit_page)) {
1054                 cpu_buffer->commit_page->page->commit =
1055                         cpu_buffer->commit_page->write;
1056                 barrier();
1057         }
1058
1059         /* again, keep gcc from optimizing */
1060         barrier();
1061
1062         /*
1063          * If an interrupt came in just after the first while loop
1064          * and pushed the tail page forward, we will be left with
1065          * a dangling commit that will never go forward.
1066          */
1067         if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
1068                 goto again;
1069 }
1070
1071 static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1072 {
1073         cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
1074         cpu_buffer->reader_page->read = 0;
1075 }
1076
1077 static void rb_inc_iter(struct ring_buffer_iter *iter)
1078 {
1079         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1080
1081         /*
1082          * The iterator could be on the reader page (it starts there).
1083          * But the head could have moved, since the reader was
1084          * found. Check for this case and assign the iterator
1085          * to the head page instead of next.
1086          */
1087         if (iter->head_page == cpu_buffer->reader_page)
1088                 iter->head_page = cpu_buffer->head_page;
1089         else
1090                 rb_inc_page(cpu_buffer, &iter->head_page);
1091
1092         iter->read_stamp = iter->head_page->page->time_stamp;
1093         iter->head = 0;
1094 }
1095
1096 /**
1097  * ring_buffer_update_event - update event type and data
1098  * @event: the even to update
1099  * @type: the type of event
1100  * @length: the size of the event field in the ring buffer
1101  *
1102  * Update the type and data fields of the event. The length
1103  * is the actual size that is written to the ring buffer,
1104  * and with this, we can determine what to place into the
1105  * data field.
1106  */
1107 static void
1108 rb_update_event(struct ring_buffer_event *event,
1109                          unsigned type, unsigned length)
1110 {
1111         event->type_len = type;
1112
1113         switch (type) {
1114
1115         case RINGBUF_TYPE_PADDING:
1116         case RINGBUF_TYPE_TIME_EXTEND:
1117         case RINGBUF_TYPE_TIME_STAMP:
1118                 break;
1119
1120         case 0:
1121                 length -= RB_EVNT_HDR_SIZE;
1122                 if (length > RB_MAX_SMALL_DATA)
1123                         event->array[0] = length;
1124                 else
1125                         event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
1126                 break;
1127         default:
1128                 BUG();
1129         }
1130 }
1131
1132 static unsigned rb_calculate_event_length(unsigned length)
1133 {
1134         struct ring_buffer_event event; /* Used only for sizeof array */
1135
1136         /* zero length can cause confusions */
1137         if (!length)
1138                 length = 1;
1139
1140         if (length > RB_MAX_SMALL_DATA)
1141                 length += sizeof(event.array[0]);
1142
1143         length += RB_EVNT_HDR_SIZE;
1144         length = ALIGN(length, RB_ALIGNMENT);
1145
1146         return length;
1147 }
1148
1149 static inline void
1150 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
1151               struct buffer_page *tail_page,
1152               unsigned long tail, unsigned long length)
1153 {
1154         struct ring_buffer_event *event;
1155
1156         /*
1157          * Only the event that crossed the page boundary
1158          * must fill the old tail_page with padding.
1159          */
1160         if (tail >= BUF_PAGE_SIZE) {
1161                 local_sub(length, &tail_page->write);
1162                 return;
1163         }
1164
1165         event = __rb_page_index(tail_page, tail);
1166         kmemcheck_annotate_bitfield(event, bitfield);
1167
1168         /*
1169          * If this event is bigger than the minimum size, then
1170          * we need to be careful that we don't subtract the
1171          * write counter enough to allow another writer to slip
1172          * in on this page.
1173          * We put in a discarded commit instead, to make sure
1174          * that this space is not used again.
1175          *
1176          * If we are less than the minimum size, we don't need to
1177          * worry about it.
1178          */
1179         if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
1180                 /* No room for any events */
1181
1182                 /* Mark the rest of the page with padding */
1183                 rb_event_set_padding(event);
1184
1185                 /* Set the write back to the previous setting */
1186                 local_sub(length, &tail_page->write);
1187                 return;
1188         }
1189
1190         /* Put in a discarded event */
1191         event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
1192         event->type_len = RINGBUF_TYPE_PADDING;
1193         /* time delta must be non zero */
1194         event->time_delta = 1;
1195         /* Account for this as an entry */
1196         local_inc(&tail_page->entries);
1197         local_inc(&cpu_buffer->entries);
1198
1199         /* Set write to end of buffer */
1200         length = (tail + length) - BUF_PAGE_SIZE;
1201         local_sub(length, &tail_page->write);
1202 }
1203
1204 static struct ring_buffer_event *
1205 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
1206              unsigned long length, unsigned long tail,
1207              struct buffer_page *commit_page,
1208              struct buffer_page *tail_page, u64 *ts)
1209 {
1210         struct buffer_page *next_page, *head_page, *reader_page;
1211         struct ring_buffer *buffer = cpu_buffer->buffer;
1212         bool lock_taken = false;
1213         unsigned long flags;
1214
1215         next_page = tail_page;
1216
1217         local_irq_save(flags);
1218         /*
1219          * Since the write to the buffer is still not
1220          * fully lockless, we must be careful with NMIs.
1221          * The locks in the writers are taken when a write
1222          * crosses to a new page. The locks protect against
1223          * races with the readers (this will soon be fixed
1224          * with a lockless solution).
1225          *
1226          * Because we can not protect against NMIs, and we
1227          * want to keep traces reentrant, we need to manage
1228          * what happens when we are in an NMI.
1229          *
1230          * NMIs can happen after we take the lock.
1231          * If we are in an NMI, only take the lock
1232          * if it is not already taken. Otherwise
1233          * simply fail.
1234          */
1235         if (unlikely(in_nmi())) {
1236                 if (!__raw_spin_trylock(&cpu_buffer->lock)) {
1237                         cpu_buffer->nmi_dropped++;
1238                         goto out_reset;
1239                 }
1240         } else
1241                 __raw_spin_lock(&cpu_buffer->lock);
1242
1243         lock_taken = true;
1244
1245         rb_inc_page(cpu_buffer, &next_page);
1246
1247         head_page = cpu_buffer->head_page;
1248         reader_page = cpu_buffer->reader_page;
1249
1250         /* we grabbed the lock before incrementing */
1251         if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
1252                 goto out_reset;
1253
1254         /*
1255          * If for some reason, we had an interrupt storm that made
1256          * it all the way around the buffer, bail, and warn
1257          * about it.
1258          */
1259         if (unlikely(next_page == commit_page)) {
1260                 cpu_buffer->commit_overrun++;
1261                 goto out_reset;
1262         }
1263
1264         if (next_page == head_page) {
1265                 if (!(buffer->flags & RB_FL_OVERWRITE))
1266                         goto out_reset;
1267
1268                 /* tail_page has not moved yet? */
1269                 if (tail_page == cpu_buffer->tail_page) {
1270                         /* count overflows */
1271                         cpu_buffer->overrun +=
1272                                 local_read(&head_page->entries);
1273
1274                         rb_inc_page(cpu_buffer, &head_page);
1275                         cpu_buffer->head_page = head_page;
1276                         cpu_buffer->head_page->read = 0;
1277                 }
1278         }
1279
1280         /*
1281          * If the tail page is still the same as what we think
1282          * it is, then it is up to us to update the tail
1283          * pointer.
1284          */
1285         if (tail_page == cpu_buffer->tail_page) {
1286                 local_set(&next_page->write, 0);
1287                 local_set(&next_page->entries, 0);
1288                 local_set(&next_page->page->commit, 0);
1289                 cpu_buffer->tail_page = next_page;
1290
1291                 /* reread the time stamp */
1292                 *ts = rb_time_stamp(buffer, cpu_buffer->cpu);
1293                 cpu_buffer->tail_page->page->time_stamp = *ts;
1294         }
1295
1296         rb_reset_tail(cpu_buffer, tail_page, tail, length);
1297
1298         __raw_spin_unlock(&cpu_buffer->lock);
1299         local_irq_restore(flags);
1300
1301         /* fail and let the caller try again */
1302         return ERR_PTR(-EAGAIN);
1303
1304  out_reset:
1305         /* reset write */
1306         rb_reset_tail(cpu_buffer, tail_page, tail, length);
1307
1308         if (likely(lock_taken))
1309                 __raw_spin_unlock(&cpu_buffer->lock);
1310         local_irq_restore(flags);
1311         return NULL;
1312 }
1313
1314 static struct ring_buffer_event *
1315 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1316                   unsigned type, unsigned long length, u64 *ts)
1317 {
1318         struct buffer_page *tail_page, *commit_page;
1319         struct ring_buffer_event *event;
1320         unsigned long tail, write;
1321
1322         commit_page = cpu_buffer->commit_page;
1323         /* we just need to protect against interrupts */
1324         barrier();
1325         tail_page = cpu_buffer->tail_page;
1326         write = local_add_return(length, &tail_page->write);
1327         tail = write - length;
1328
1329         /* See if we shot pass the end of this buffer page */
1330         if (write > BUF_PAGE_SIZE)
1331                 return rb_move_tail(cpu_buffer, length, tail,
1332                                     commit_page, tail_page, ts);
1333
1334         /* We reserved something on the buffer */
1335
1336         event = __rb_page_index(tail_page, tail);
1337         kmemcheck_annotate_bitfield(event, bitfield);
1338         rb_update_event(event, type, length);
1339
1340         /* The passed in type is zero for DATA */
1341         if (likely(!type))
1342                 local_inc(&tail_page->entries);
1343
1344         /*
1345          * If this is the first commit on the page, then update
1346          * its timestamp.
1347          */
1348         if (!tail)
1349                 tail_page->page->time_stamp = *ts;
1350
1351         return event;
1352 }
1353
1354 static inline int
1355 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
1356                   struct ring_buffer_event *event)
1357 {
1358         unsigned long new_index, old_index;
1359         struct buffer_page *bpage;
1360         unsigned long index;
1361         unsigned long addr;
1362
1363         new_index = rb_event_index(event);
1364         old_index = new_index + rb_event_length(event);
1365         addr = (unsigned long)event;
1366         addr &= PAGE_MASK;
1367
1368         bpage = cpu_buffer->tail_page;
1369
1370         if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
1371                 /*
1372                  * This is on the tail page. It is possible that
1373                  * a write could come in and move the tail page
1374                  * and write to the next page. That is fine
1375                  * because we just shorten what is on this page.
1376                  */
1377                 index = local_cmpxchg(&bpage->write, old_index, new_index);
1378                 if (index == old_index)
1379                         return 1;
1380         }
1381
1382         /* could not discard */
1383         return 0;
1384 }
1385
1386 static int
1387 rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1388                   u64 *ts, u64 *delta)
1389 {
1390         struct ring_buffer_event *event;
1391         static int once;
1392         int ret;
1393
1394         if (unlikely(*delta > (1ULL << 59) && !once++)) {
1395                 printk(KERN_WARNING "Delta way too big! %llu"
1396                        " ts=%llu write stamp = %llu\n",
1397                        (unsigned long long)*delta,
1398                        (unsigned long long)*ts,
1399                        (unsigned long long)cpu_buffer->write_stamp);
1400                 WARN_ON(1);
1401         }
1402
1403         /*
1404          * The delta is too big, we to add a
1405          * new timestamp.
1406          */
1407         event = __rb_reserve_next(cpu_buffer,
1408                                   RINGBUF_TYPE_TIME_EXTEND,
1409                                   RB_LEN_TIME_EXTEND,
1410                                   ts);
1411         if (!event)
1412                 return -EBUSY;
1413
1414         if (PTR_ERR(event) == -EAGAIN)
1415                 return -EAGAIN;
1416
1417         /* Only a commited time event can update the write stamp */
1418         if (rb_event_is_commit(cpu_buffer, event)) {
1419                 /*
1420                  * If this is the first on the page, then it was
1421                  * updated with the page itself. Try to discard it
1422                  * and if we can't just make it zero.
1423                  */
1424                 if (rb_event_index(event)) {
1425                         event->time_delta = *delta & TS_MASK;
1426                         event->array[0] = *delta >> TS_SHIFT;
1427                 } else {
1428                         /* try to discard, since we do not need this */
1429                         if (!rb_try_to_discard(cpu_buffer, event)) {
1430                                 /* nope, just zero it */
1431                                 event->time_delta = 0;
1432                                 event->array[0] = 0;
1433                         }
1434                 }
1435                 cpu_buffer->write_stamp = *ts;
1436                 /* let the caller know this was the commit */
1437                 ret = 1;
1438         } else {
1439                 /* Try to discard the event */
1440                 if (!rb_try_to_discard(cpu_buffer, event)) {
1441                         /* Darn, this is just wasted space */
1442                         event->time_delta = 0;
1443                         event->array[0] = 0;
1444                 }
1445                 ret = 0;
1446         }
1447
1448         *delta = 0;
1449
1450         return ret;
1451 }
1452
1453 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
1454 {
1455         local_inc(&cpu_buffer->committing);
1456         local_inc(&cpu_buffer->commits);
1457 }
1458
1459 static void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
1460 {
1461         unsigned long commits;
1462
1463         if (RB_WARN_ON(cpu_buffer,
1464                        !local_read(&cpu_buffer->committing)))
1465                 return;
1466
1467  again:
1468         commits = local_read(&cpu_buffer->commits);
1469         /* synchronize with interrupts */
1470         barrier();
1471         if (local_read(&cpu_buffer->committing) == 1)
1472                 rb_set_commit_to_write(cpu_buffer);
1473
1474         local_dec(&cpu_buffer->committing);
1475
1476         /* synchronize with interrupts */
1477         barrier();
1478
1479         /*
1480          * Need to account for interrupts coming in between the
1481          * updating of the commit page and the clearing of the
1482          * committing counter.
1483          */
1484         if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
1485             !local_read(&cpu_buffer->committing)) {
1486                 local_inc(&cpu_buffer->committing);
1487                 goto again;
1488         }
1489 }
1490
1491 static struct ring_buffer_event *
1492 rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1493                       unsigned long length)
1494 {
1495         struct ring_buffer_event *event;
1496         u64 ts, delta = 0;
1497         int commit = 0;
1498         int nr_loops = 0;
1499
1500         rb_start_commit(cpu_buffer);
1501
1502         length = rb_calculate_event_length(length);
1503  again:
1504         /*
1505          * We allow for interrupts to reenter here and do a trace.
1506          * If one does, it will cause this original code to loop
1507          * back here. Even with heavy interrupts happening, this
1508          * should only happen a few times in a row. If this happens
1509          * 1000 times in a row, there must be either an interrupt
1510          * storm or we have something buggy.
1511          * Bail!
1512          */
1513         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
1514                 goto out_fail;
1515
1516         ts = rb_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu);
1517
1518         /*
1519          * Only the first commit can update the timestamp.
1520          * Yes there is a race here. If an interrupt comes in
1521          * just after the conditional and it traces too, then it
1522          * will also check the deltas. More than one timestamp may
1523          * also be made. But only the entry that did the actual
1524          * commit will be something other than zero.
1525          */
1526         if (likely(cpu_buffer->tail_page == cpu_buffer->commit_page &&
1527                    rb_page_write(cpu_buffer->tail_page) ==
1528                    rb_commit_index(cpu_buffer))) {
1529                 u64 diff;
1530
1531                 diff = ts - cpu_buffer->write_stamp;
1532
1533                 /* make sure this diff is calculated here */
1534                 barrier();
1535
1536                 /* Did the write stamp get updated already? */
1537                 if (unlikely(ts < cpu_buffer->write_stamp))
1538                         goto get_event;
1539
1540                 delta = diff;
1541                 if (unlikely(test_time_stamp(delta))) {
1542
1543                         commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
1544                         if (commit == -EBUSY)
1545                                 goto out_fail;
1546
1547                         if (commit == -EAGAIN)
1548                                 goto again;
1549
1550                         RB_WARN_ON(cpu_buffer, commit < 0);
1551                 }
1552         }
1553
1554  get_event:
1555         event = __rb_reserve_next(cpu_buffer, 0, length, &ts);
1556         if (unlikely(PTR_ERR(event) == -EAGAIN))
1557                 goto again;
1558
1559         if (!event)
1560                 goto out_fail;
1561
1562         if (!rb_event_is_commit(cpu_buffer, event))
1563                 delta = 0;
1564
1565         event->time_delta = delta;
1566
1567         return event;
1568
1569  out_fail:
1570         rb_end_commit(cpu_buffer);
1571         return NULL;
1572 }
1573
1574 #ifdef CONFIG_TRACING
1575
1576 #define TRACE_RECURSIVE_DEPTH 16
1577
1578 static int trace_recursive_lock(void)
1579 {
1580         current->trace_recursion++;
1581
1582         if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH))
1583                 return 0;
1584
1585         /* Disable all tracing before we do anything else */
1586         tracing_off_permanent();
1587
1588         printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:"
1589                     "HC[%lu]:SC[%lu]:NMI[%lu]\n",
1590                     current->trace_recursion,
1591                     hardirq_count() >> HARDIRQ_SHIFT,
1592                     softirq_count() >> SOFTIRQ_SHIFT,
1593                     in_nmi());
1594
1595         WARN_ON_ONCE(1);
1596         return -1;
1597 }
1598
1599 static void trace_recursive_unlock(void)
1600 {
1601         WARN_ON_ONCE(!current->trace_recursion);
1602
1603         current->trace_recursion--;
1604 }
1605
1606 #else
1607
1608 #define trace_recursive_lock()          (0)
1609 #define trace_recursive_unlock()        do { } while (0)
1610
1611 #endif
1612
1613 static DEFINE_PER_CPU(int, rb_need_resched);
1614
1615 /**
1616  * ring_buffer_lock_reserve - reserve a part of the buffer
1617  * @buffer: the ring buffer to reserve from
1618  * @length: the length of the data to reserve (excluding event header)
1619  *
1620  * Returns a reseverd event on the ring buffer to copy directly to.
1621  * The user of this interface will need to get the body to write into
1622  * and can use the ring_buffer_event_data() interface.
1623  *
1624  * The length is the length of the data needed, not the event length
1625  * which also includes the event header.
1626  *
1627  * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
1628  * If NULL is returned, then nothing has been allocated or locked.
1629  */
1630 struct ring_buffer_event *
1631 ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
1632 {
1633         struct ring_buffer_per_cpu *cpu_buffer;
1634         struct ring_buffer_event *event;
1635         int cpu, resched;
1636
1637         if (ring_buffer_flags != RB_BUFFERS_ON)
1638                 return NULL;
1639
1640         if (atomic_read(&buffer->record_disabled))
1641                 return NULL;
1642
1643         /* If we are tracing schedule, we don't want to recurse */
1644         resched = ftrace_preempt_disable();
1645
1646         if (trace_recursive_lock())
1647                 goto out_nocheck;
1648
1649         cpu = raw_smp_processor_id();
1650
1651         if (!cpumask_test_cpu(cpu, buffer->cpumask))
1652                 goto out;
1653
1654         cpu_buffer = buffer->buffers[cpu];
1655
1656         if (atomic_read(&cpu_buffer->record_disabled))
1657                 goto out;
1658
1659         if (length > BUF_MAX_DATA_SIZE)
1660                 goto out;
1661
1662         event = rb_reserve_next_event(cpu_buffer, length);
1663         if (!event)
1664                 goto out;
1665
1666         /*
1667          * Need to store resched state on this cpu.
1668          * Only the first needs to.
1669          */
1670
1671         if (preempt_count() == 1)
1672                 per_cpu(rb_need_resched, cpu) = resched;
1673
1674         return event;
1675
1676  out:
1677         trace_recursive_unlock();
1678
1679  out_nocheck:
1680         ftrace_preempt_enable(resched);
1681         return NULL;
1682 }
1683 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
1684
1685 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1686                       struct ring_buffer_event *event)
1687 {
1688         local_inc(&cpu_buffer->entries);
1689
1690         /*
1691          * The event first in the commit queue updates the
1692          * time stamp.
1693          */
1694         if (rb_event_is_commit(cpu_buffer, event))
1695                 cpu_buffer->write_stamp += event->time_delta;
1696
1697         rb_end_commit(cpu_buffer);
1698 }
1699
1700 /**
1701  * ring_buffer_unlock_commit - commit a reserved
1702  * @buffer: The buffer to commit to
1703  * @event: The event pointer to commit.
1704  *
1705  * This commits the data to the ring buffer, and releases any locks held.
1706  *
1707  * Must be paired with ring_buffer_lock_reserve.
1708  */
1709 int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1710                               struct ring_buffer_event *event)
1711 {
1712         struct ring_buffer_per_cpu *cpu_buffer;
1713         int cpu = raw_smp_processor_id();
1714
1715         cpu_buffer = buffer->buffers[cpu];
1716
1717         rb_commit(cpu_buffer, event);
1718
1719         trace_recursive_unlock();
1720
1721         /*
1722          * Only the last preempt count needs to restore preemption.
1723          */
1724         if (preempt_count() == 1)
1725                 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1726         else
1727                 preempt_enable_no_resched_notrace();
1728
1729         return 0;
1730 }
1731 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
1732
1733 static inline void rb_event_discard(struct ring_buffer_event *event)
1734 {
1735         /* array[0] holds the actual length for the discarded event */
1736         event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
1737         event->type_len = RINGBUF_TYPE_PADDING;
1738         /* time delta must be non zero */
1739         if (!event->time_delta)
1740                 event->time_delta = 1;
1741 }
1742
1743 /**
1744  * ring_buffer_event_discard - discard any event in the ring buffer
1745  * @event: the event to discard
1746  *
1747  * Sometimes a event that is in the ring buffer needs to be ignored.
1748  * This function lets the user discard an event in the ring buffer
1749  * and then that event will not be read later.
1750  *
1751  * Note, it is up to the user to be careful with this, and protect
1752  * against races. If the user discards an event that has been consumed
1753  * it is possible that it could corrupt the ring buffer.
1754  */
1755 void ring_buffer_event_discard(struct ring_buffer_event *event)
1756 {
1757         rb_event_discard(event);
1758 }
1759 EXPORT_SYMBOL_GPL(ring_buffer_event_discard);
1760
1761 /**
1762  * ring_buffer_commit_discard - discard an event that has not been committed
1763  * @buffer: the ring buffer
1764  * @event: non committed event to discard
1765  *
1766  * This is similar to ring_buffer_event_discard but must only be
1767  * performed on an event that has not been committed yet. The difference
1768  * is that this will also try to free the event from the ring buffer
1769  * if another event has not been added behind it.
1770  *
1771  * If another event has been added behind it, it will set the event
1772  * up as discarded, and perform the commit.
1773  *
1774  * If this function is called, do not call ring_buffer_unlock_commit on
1775  * the event.
1776  */
1777 void ring_buffer_discard_commit(struct ring_buffer *buffer,
1778                                 struct ring_buffer_event *event)
1779 {
1780         struct ring_buffer_per_cpu *cpu_buffer;
1781         int cpu;
1782
1783         /* The event is discarded regardless */
1784         rb_event_discard(event);
1785
1786         cpu = smp_processor_id();
1787         cpu_buffer = buffer->buffers[cpu];
1788
1789         /*
1790          * This must only be called if the event has not been
1791          * committed yet. Thus we can assume that preemption
1792          * is still disabled.
1793          */
1794         RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
1795
1796         if (!rb_try_to_discard(cpu_buffer, event))
1797                 goto out;
1798
1799         /*
1800          * The commit is still visible by the reader, so we
1801          * must increment entries.
1802          */
1803         local_inc(&cpu_buffer->entries);
1804  out:
1805         rb_end_commit(cpu_buffer);
1806
1807         trace_recursive_unlock();
1808
1809         /*
1810          * Only the last preempt count needs to restore preemption.
1811          */
1812         if (preempt_count() == 1)
1813                 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1814         else
1815                 preempt_enable_no_resched_notrace();
1816
1817 }
1818 EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
1819
1820 /**
1821  * ring_buffer_write - write data to the buffer without reserving
1822  * @buffer: The ring buffer to write to.
1823  * @length: The length of the data being written (excluding the event header)
1824  * @data: The data to write to the buffer.
1825  *
1826  * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
1827  * one function. If you already have the data to write to the buffer, it
1828  * may be easier to simply call this function.
1829  *
1830  * Note, like ring_buffer_lock_reserve, the length is the length of the data
1831  * and not the length of the event which would hold the header.
1832  */
1833 int ring_buffer_write(struct ring_buffer *buffer,
1834                         unsigned long length,
1835                         void *data)
1836 {
1837         struct ring_buffer_per_cpu *cpu_buffer;
1838         struct ring_buffer_event *event;
1839         void *body;
1840         int ret = -EBUSY;
1841         int cpu, resched;
1842
1843         if (ring_buffer_flags != RB_BUFFERS_ON)
1844                 return -EBUSY;
1845
1846         if (atomic_read(&buffer->record_disabled))
1847                 return -EBUSY;
1848
1849         resched = ftrace_preempt_disable();
1850
1851         cpu = raw_smp_processor_id();
1852
1853         if (!cpumask_test_cpu(cpu, buffer->cpumask))
1854                 goto out;
1855
1856         cpu_buffer = buffer->buffers[cpu];
1857
1858         if (atomic_read(&cpu_buffer->record_disabled))
1859                 goto out;
1860
1861         if (length > BUF_MAX_DATA_SIZE)
1862                 goto out;
1863
1864         event = rb_reserve_next_event(cpu_buffer, length);
1865         if (!event)
1866                 goto out;
1867
1868         body = rb_event_data(event);
1869
1870         memcpy(body, data, length);
1871
1872         rb_commit(cpu_buffer, event);
1873
1874         ret = 0;
1875  out:
1876         ftrace_preempt_enable(resched);
1877
1878         return ret;
1879 }
1880 EXPORT_SYMBOL_GPL(ring_buffer_write);
1881
1882 static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1883 {
1884         struct buffer_page *reader = cpu_buffer->reader_page;
1885         struct buffer_page *head = cpu_buffer->head_page;
1886         struct buffer_page *commit = cpu_buffer->commit_page;
1887
1888         return reader->read == rb_page_commit(reader) &&
1889                 (commit == reader ||
1890                  (commit == head &&
1891                   head->read == rb_page_commit(commit)));
1892 }
1893
1894 /**
1895  * ring_buffer_record_disable - stop all writes into the buffer
1896  * @buffer: The ring buffer to stop writes to.
1897  *
1898  * This prevents all writes to the buffer. Any attempt to write
1899  * to the buffer after this will fail and return NULL.
1900  *
1901  * The caller should call synchronize_sched() after this.
1902  */
1903 void ring_buffer_record_disable(struct ring_buffer *buffer)
1904 {
1905         atomic_inc(&buffer->record_disabled);
1906 }
1907 EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
1908
1909 /**
1910  * ring_buffer_record_enable - enable writes to the buffer
1911  * @buffer: The ring buffer to enable writes
1912  *
1913  * Note, multiple disables will need the same number of enables
1914  * to truely enable the writing (much like preempt_disable).
1915  */
1916 void ring_buffer_record_enable(struct ring_buffer *buffer)
1917 {
1918         atomic_dec(&buffer->record_disabled);
1919 }
1920 EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
1921
1922 /**
1923  * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1924  * @buffer: The ring buffer to stop writes to.
1925  * @cpu: The CPU buffer to stop
1926  *
1927  * This prevents all writes to the buffer. Any attempt to write
1928  * to the buffer after this will fail and return NULL.
1929  *
1930  * The caller should call synchronize_sched() after this.
1931  */
1932 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1933 {
1934         struct ring_buffer_per_cpu *cpu_buffer;
1935
1936         if (!cpumask_test_cpu(cpu, buffer->cpumask))
1937                 return;
1938
1939         cpu_buffer = buffer->buffers[cpu];
1940         atomic_inc(&cpu_buffer->record_disabled);
1941 }
1942 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
1943
1944 /**
1945  * ring_buffer_record_enable_cpu - enable writes to the buffer
1946  * @buffer: The ring buffer to enable writes
1947  * @cpu: The CPU to enable.
1948  *
1949  * Note, multiple disables will need the same number of enables
1950  * to truely enable the writing (much like preempt_disable).
1951  */
1952 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1953 {
1954         struct ring_buffer_per_cpu *cpu_buffer;
1955
1956         if (!cpumask_test_cpu(cpu, buffer->cpumask))
1957                 return;
1958
1959         cpu_buffer = buffer->buffers[cpu];
1960         atomic_dec(&cpu_buffer->record_disabled);
1961 }
1962 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
1963
1964 /**
1965  * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1966  * @buffer: The ring buffer
1967  * @cpu: The per CPU buffer to get the entries from.
1968  */
1969 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1970 {
1971         struct ring_buffer_per_cpu *cpu_buffer;
1972         unsigned long ret;
1973
1974         if (!cpumask_test_cpu(cpu, buffer->cpumask))
1975                 return 0;
1976
1977         cpu_buffer = buffer->buffers[cpu];
1978         ret = (local_read(&cpu_buffer->entries) - cpu_buffer->overrun)
1979                 - cpu_buffer->read;
1980
1981         return ret;
1982 }
1983 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
1984
1985 /**
1986  * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1987  * @buffer: The ring buffer
1988  * @cpu: The per CPU buffer to get the number of overruns from
1989  */
1990 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1991 {
1992         struct ring_buffer_per_cpu *cpu_buffer;
1993         unsigned long ret;
1994
1995         if (!cpumask_test_cpu(cpu, buffer->cpumask))
1996                 return 0;
1997
1998         cpu_buffer = buffer->buffers[cpu];
1999         ret = cpu_buffer->overrun;
2000
2001         return ret;
2002 }
2003 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
2004
2005 /**
2006  * ring_buffer_nmi_dropped_cpu - get the number of nmis that were dropped
2007  * @buffer: The ring buffer
2008  * @cpu: The per CPU buffer to get the number of overruns from
2009  */
2010 unsigned long ring_buffer_nmi_dropped_cpu(struct ring_buffer *buffer, int cpu)
2011 {
2012         struct ring_buffer_per_cpu *cpu_buffer;
2013         unsigned long ret;
2014
2015         if (!cpumask_test_cpu(cpu, buffer->cpumask))
2016                 return 0;
2017
2018         cpu_buffer = buffer->buffers[cpu];
2019         ret = cpu_buffer->nmi_dropped;
2020
2021         return ret;
2022 }
2023 EXPORT_SYMBOL_GPL(ring_buffer_nmi_dropped_cpu);
2024
2025 /**
2026  * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits
2027  * @buffer: The ring buffer
2028  * @cpu: The per CPU buffer to get the number of overruns from
2029  */
2030 unsigned long
2031 ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
2032 {
2033         struct ring_buffer_per_cpu *cpu_buffer;
2034         unsigned long ret;
2035
2036         if (!cpumask_test_cpu(cpu, buffer->cpumask))
2037                 return 0;
2038
2039         cpu_buffer = buffer->buffers[cpu];
2040         ret = cpu_buffer->commit_overrun;
2041
2042         return ret;
2043 }
2044 EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
2045
2046 /**
2047  * ring_buffer_entries - get the number of entries in a buffer
2048  * @buffer: The ring buffer
2049  *
2050  * Returns the total number of entries in the ring buffer
2051  * (all CPU entries)
2052  */
2053 unsigned long ring_buffer_entries(struct ring_buffer *buffer)
2054 {
2055         struct ring_buffer_per_cpu *cpu_buffer;
2056         unsigned long entries = 0;
2057         int cpu;
2058
2059         /* if you care about this being correct, lock the buffer */
2060         for_each_buffer_cpu(buffer, cpu) {
2061                 cpu_buffer = buffer->buffers[cpu];
2062                 entries += (local_read(&cpu_buffer->entries) -
2063                             cpu_buffer->overrun) - cpu_buffer->read;
2064         }
2065
2066         return entries;
2067 }
2068 EXPORT_SYMBOL_GPL(ring_buffer_entries);
2069
2070 /**
2071  * ring_buffer_overrun_cpu - get the number of overruns in buffer
2072  * @buffer: The ring buffer
2073  *
2074  * Returns the total number of overruns in the ring buffer
2075  * (all CPU entries)
2076  */
2077 unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
2078 {
2079         struct ring_buffer_per_cpu *cpu_buffer;
2080         unsigned long overruns = 0;
2081         int cpu;
2082
2083         /* if you care about this being correct, lock the buffer */
2084         for_each_buffer_cpu(buffer, cpu) {
2085                 cpu_buffer = buffer->buffers[cpu];
2086                 overruns += cpu_buffer->overrun;
2087         }
2088
2089         return overruns;
2090 }
2091 EXPORT_SYMBOL_GPL(ring_buffer_overruns);
2092
2093 static void rb_iter_reset(struct ring_buffer_iter *iter)
2094 {
2095         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2096
2097         /* Iterator usage is expected to have record disabled */
2098         if (list_empty(&cpu_buffer->reader_page->list)) {
2099                 iter->head_page = cpu_buffer->head_page;
2100                 iter->head = cpu_buffer->head_page->read;
2101         } else {
2102                 iter->head_page = cpu_buffer->reader_page;
2103                 iter->head = cpu_buffer->reader_page->read;
2104         }
2105         if (iter->head)
2106                 iter->read_stamp = cpu_buffer->read_stamp;
2107         else
2108                 iter->read_stamp = iter->head_page->page->time_stamp;
2109 }
2110
2111 /**
2112  * ring_buffer_iter_reset - reset an iterator
2113  * @iter: The iterator to reset
2114  *
2115  * Resets the iterator, so that it will start from the beginning
2116  * again.
2117  */
2118 void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
2119 {
2120         struct ring_buffer_per_cpu *cpu_buffer;
2121         unsigned long flags;
2122
2123         if (!iter)
2124                 return;
2125
2126         cpu_buffer = iter->cpu_buffer;
2127
2128         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2129         rb_iter_reset(iter);
2130         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2131 }
2132 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
2133
2134 /**
2135  * ring_buffer_iter_empty - check if an iterator has no more to read
2136  * @iter: The iterator to check
2137  */
2138 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
2139 {
2140         struct ring_buffer_per_cpu *cpu_buffer;
2141
2142         cpu_buffer = iter->cpu_buffer;
2143
2144         return iter->head_page == cpu_buffer->commit_page &&
2145                 iter->head == rb_commit_index(cpu_buffer);
2146 }
2147 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
2148
2149 static void
2150 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2151                      struct ring_buffer_event *event)
2152 {
2153         u64 delta;
2154
2155         switch (event->type_len) {
2156         case RINGBUF_TYPE_PADDING:
2157                 return;
2158
2159         case RINGBUF_TYPE_TIME_EXTEND:
2160                 delta = event->array[0];
2161                 delta <<= TS_SHIFT;
2162                 delta += event->time_delta;
2163                 cpu_buffer->read_stamp += delta;
2164                 return;
2165
2166         case RINGBUF_TYPE_TIME_STAMP:
2167                 /* FIXME: not implemented */
2168                 return;
2169
2170         case RINGBUF_TYPE_DATA:
2171                 cpu_buffer->read_stamp += event->time_delta;
2172                 return;
2173
2174         default:
2175                 BUG();
2176         }
2177         return;
2178 }
2179
2180 static void
2181 rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
2182                           struct ring_buffer_event *event)
2183 {
2184         u64 delta;
2185
2186         switch (event->type_len) {
2187         case RINGBUF_TYPE_PADDING:
2188                 return;
2189
2190         case RINGBUF_TYPE_TIME_EXTEND:
2191                 delta = event->array[0];
2192                 delta <<= TS_SHIFT;
2193                 delta += event->time_delta;
2194                 iter->read_stamp += delta;
2195                 return;
2196
2197         case RINGBUF_TYPE_TIME_STAMP:
2198                 /* FIXME: not implemented */
2199                 return;
2200
2201         case RINGBUF_TYPE_DATA:
2202                 iter->read_stamp += event->time_delta;
2203                 return;
2204
2205         default:
2206                 BUG();
2207         }
2208         return;
2209 }
2210
2211 static struct buffer_page *
2212 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2213 {
2214         struct buffer_page *reader = NULL;
2215         unsigned long flags;
2216         int nr_loops = 0;
2217
2218         local_irq_save(flags);
2219         __raw_spin_lock(&cpu_buffer->lock);
2220
2221  again:
2222         /*
2223          * This should normally only loop twice. But because the
2224          * start of the reader inserts an empty page, it causes
2225          * a case where we will loop three times. There should be no
2226          * reason to loop four times (that I know of).
2227          */
2228         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
2229                 reader = NULL;
2230                 goto out;
2231         }
2232
2233         reader = cpu_buffer->reader_page;
2234
2235         /* If there's more to read, return this page */
2236         if (cpu_buffer->reader_page->read < rb_page_size(reader))
2237                 goto out;
2238
2239         /* Never should we have an index greater than the size */
2240         if (RB_WARN_ON(cpu_buffer,
2241                        cpu_buffer->reader_page->read > rb_page_size(reader)))
2242                 goto out;
2243
2244         /* check if we caught up to the tail */
2245         reader = NULL;
2246         if (cpu_buffer->commit_page == cpu_buffer->reader_page)
2247                 goto out;
2248
2249         /*
2250          * Splice the empty reader page into the list around the head.
2251          * Reset the reader page to size zero.
2252          */
2253
2254         reader = cpu_buffer->head_page;
2255         cpu_buffer->reader_page->list.next = reader->list.next;
2256         cpu_buffer->reader_page->list.prev = reader->list.prev;
2257
2258         /*
2259          * cpu_buffer->pages just needs to point to the buffer, it
2260          *  has no specific buffer page to point to. Lets move it out
2261          *  of our way so we don't accidently swap it.
2262          */
2263         cpu_buffer->pages = reader->list.prev;
2264
2265         local_set(&cpu_buffer->reader_page->write, 0);
2266         local_set(&cpu_buffer->reader_page->entries, 0);
2267         local_set(&cpu_buffer->reader_page->page->commit, 0);
2268
2269         /* Make the reader page now replace the head */
2270         reader->list.prev->next = &cpu_buffer->reader_page->list;
2271         reader->list.next->prev = &cpu_buffer->reader_page->list;
2272
2273         /*
2274          * If the tail is on the reader, then we must set the head
2275          * to the inserted page, otherwise we set it one before.
2276          */
2277         cpu_buffer->head_page = cpu_buffer->reader_page;
2278
2279         if (cpu_buffer->commit_page != reader)
2280                 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
2281
2282         /* Finally update the reader page to the new head */
2283         cpu_buffer->reader_page = reader;
2284         rb_reset_reader_page(cpu_buffer);
2285
2286         goto again;
2287
2288  out:
2289         __raw_spin_unlock(&cpu_buffer->lock);
2290         local_irq_restore(flags);
2291
2292         return reader;
2293 }
2294
2295 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
2296 {
2297         struct ring_buffer_event *event;
2298         struct buffer_page *reader;
2299         unsigned length;
2300
2301         reader = rb_get_reader_page(cpu_buffer);
2302
2303         /* This function should not be called when buffer is empty */
2304         if (RB_WARN_ON(cpu_buffer, !reader))
2305                 return;
2306
2307         event = rb_reader_event(cpu_buffer);
2308
2309         if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX
2310                         || rb_discarded_event(event))
2311                 cpu_buffer->read++;
2312
2313         rb_update_read_stamp(cpu_buffer, event);
2314
2315         length = rb_event_length(event);
2316         cpu_buffer->reader_page->read += length;
2317 }
2318
2319 static void rb_advance_iter(struct ring_buffer_iter *iter)
2320 {
2321         struct ring_buffer *buffer;
2322         struct ring_buffer_per_cpu *cpu_buffer;
2323         struct ring_buffer_event *event;
2324         unsigned length;
2325
2326         cpu_buffer = iter->cpu_buffer;
2327         buffer = cpu_buffer->buffer;
2328
2329         /*
2330          * Check if we are at the end of the buffer.
2331          */
2332         if (iter->head >= rb_page_size(iter->head_page)) {
2333                 /* discarded commits can make the page empty */
2334                 if (iter->head_page == cpu_buffer->commit_page)
2335                         return;
2336                 rb_inc_iter(iter);
2337                 return;
2338         }
2339
2340         event = rb_iter_head_event(iter);
2341
2342         length = rb_event_length(event);
2343
2344         /*
2345          * This should not be called to advance the header if we are
2346          * at the tail of the buffer.
2347          */
2348         if (RB_WARN_ON(cpu_buffer,
2349                        (iter->head_page == cpu_buffer->commit_page) &&
2350                        (iter->head + length > rb_commit_index(cpu_buffer))))
2351                 return;
2352
2353         rb_update_iter_read_stamp(iter, event);
2354
2355         iter->head += length;
2356
2357         /* check for end of page padding */
2358         if ((iter->head >= rb_page_size(iter->head_page)) &&
2359             (iter->head_page != cpu_buffer->commit_page))
2360                 rb_advance_iter(iter);
2361 }
2362
2363 static struct ring_buffer_event *
2364 rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
2365 {
2366         struct ring_buffer_per_cpu *cpu_buffer;
2367         struct ring_buffer_event *event;
2368         struct buffer_page *reader;
2369         int nr_loops = 0;
2370
2371         cpu_buffer = buffer->buffers[cpu];
2372
2373  again:
2374         /*
2375          * We repeat when a timestamp is encountered. It is possible
2376          * to get multiple timestamps from an interrupt entering just
2377          * as one timestamp is about to be written, or from discarded
2378          * commits. The most that we can have is the number on a single page.
2379          */
2380         if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
2381                 return NULL;
2382
2383         reader = rb_get_reader_page(cpu_buffer);
2384         if (!reader)
2385                 return NULL;
2386
2387         event = rb_reader_event(cpu_buffer);
2388
2389         switch (event->type_len) {
2390         case RINGBUF_TYPE_PADDING:
2391                 if (rb_null_event(event))
2392                         RB_WARN_ON(cpu_buffer, 1);
2393                 /*
2394                  * Because the writer could be discarding every
2395                  * event it creates (which would probably be bad)
2396                  * if we were to go back to "again" then we may never
2397                  * catch up, and will trigger the warn on, or lock
2398                  * the box. Return the padding, and we will release
2399                  * the current locks, and try again.
2400                  */
2401                 rb_advance_reader(cpu_buffer);
2402                 return event;
2403
2404         case RINGBUF_TYPE_TIME_EXTEND:
2405                 /* Internal data, OK to advance */
2406                 rb_advance_reader(cpu_buffer);
2407                 goto again;
2408
2409         case RINGBUF_TYPE_TIME_STAMP:
2410                 /* FIXME: not implemented */
2411                 rb_advance_reader(cpu_buffer);
2412                 goto again;
2413
2414         case RINGBUF_TYPE_DATA:
2415                 if (ts) {
2416                         *ts = cpu_buffer->read_stamp + event->time_delta;
2417                         ring_buffer_normalize_time_stamp(buffer,
2418                                                          cpu_buffer->cpu, ts);
2419                 }
2420                 return event;
2421
2422         default:
2423                 BUG();
2424         }
2425
2426         return NULL;
2427 }
2428 EXPORT_SYMBOL_GPL(ring_buffer_peek);
2429
2430 static struct ring_buffer_event *
2431 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
2432 {
2433         struct ring_buffer *buffer;
2434         struct ring_buffer_per_cpu *cpu_buffer;
2435         struct ring_buffer_event *event;
2436         int nr_loops = 0;
2437
2438         if (ring_buffer_iter_empty(iter))
2439                 return NULL;
2440
2441         cpu_buffer = iter->cpu_buffer;
2442         buffer = cpu_buffer->buffer;
2443
2444  again:
2445         /*
2446          * We repeat when a timestamp is encountered.
2447          * We can get multiple timestamps by nested interrupts or also
2448          * if filtering is on (discarding commits). Since discarding
2449          * commits can be frequent we can get a lot of timestamps.
2450          * But we limit them by not adding timestamps if they begin
2451          * at the start of a page.
2452          */
2453         if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
2454                 return NULL;
2455
2456         if (rb_per_cpu_empty(cpu_buffer))
2457                 return NULL;
2458
2459         event = rb_iter_head_event(iter);
2460
2461         switch (event->type_len) {
2462         case RINGBUF_TYPE_PADDING:
2463                 if (rb_null_event(event)) {
2464                         rb_inc_iter(iter);
2465                         goto again;
2466                 }
2467                 rb_advance_iter(iter);
2468                 return event;
2469
2470         case RINGBUF_TYPE_TIME_EXTEND:
2471                 /* Internal data, OK to advance */
2472                 rb_advance_iter(iter);
2473                 goto again;
2474
2475         case RINGBUF_TYPE_TIME_STAMP:
2476                 /* FIXME: not implemented */
2477                 rb_advance_iter(iter);
2478                 goto again;
2479
2480         case RINGBUF_TYPE_DATA:
2481                 if (ts) {
2482                         *ts = iter->read_stamp + event->time_delta;
2483                         ring_buffer_normalize_time_stamp(buffer,
2484                                                          cpu_buffer->cpu, ts);
2485                 }
2486                 return event;
2487
2488         default:
2489                 BUG();
2490         }
2491
2492         return NULL;
2493 }
2494 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
2495
2496 static inline int rb_ok_to_lock(void)
2497 {
2498         /*
2499          * If an NMI die dumps out the content of the ring buffer
2500          * do not grab locks. We also permanently disable the ring
2501          * buffer too. A one time deal is all you get from reading
2502          * the ring buffer from an NMI.
2503          */
2504         if (likely(!in_nmi() && !oops_in_progress))
2505                 return 1;
2506
2507         tracing_off_permanent();
2508         return 0;
2509 }
2510
2511 /**
2512  * ring_buffer_peek - peek at the next event to be read
2513  * @buffer: The ring buffer to read
2514  * @cpu: The cpu to peak at
2515  * @ts: The timestamp counter of this event.
2516  *
2517  * This will return the event that will be read next, but does
2518  * not consume the data.
2519  */
2520 struct ring_buffer_event *
2521 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
2522 {
2523         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2524         struct ring_buffer_event *event;
2525         unsigned long flags;
2526         int dolock;
2527
2528         if (!cpumask_test_cpu(cpu, buffer->cpumask))
2529                 return NULL;
2530
2531         dolock = rb_ok_to_lock();
2532  again:
2533         local_irq_save(flags);
2534         if (dolock)
2535                 spin_lock(&cpu_buffer->reader_lock);
2536         event = rb_buffer_peek(buffer, cpu, ts);
2537         if (dolock)
2538                 spin_unlock(&cpu_buffer->reader_lock);
2539         local_irq_restore(flags);
2540
2541         if (event && event->type_len == RINGBUF_TYPE_PADDING) {
2542                 cpu_relax();
2543                 goto again;
2544         }
2545
2546         return event;
2547 }
2548
2549 /**
2550  * ring_buffer_iter_peek - peek at the next event to be read
2551  * @iter: The ring buffer iterator
2552  * @ts: The timestamp counter of this event.
2553  *
2554  * This will return the event that will be read next, but does
2555  * not increment the iterator.
2556  */
2557 struct ring_buffer_event *
2558 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
2559 {
2560         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2561         struct ring_buffer_event *event;
2562         unsigned long flags;
2563
2564  again:
2565         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2566         event = rb_iter_peek(iter, ts);
2567         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2568
2569         if (event && event->type_len == RINGBUF_TYPE_PADDING) {
2570                 cpu_relax();
2571                 goto again;
2572         }
2573
2574         return event;
2575 }
2576
2577 /**
2578  * ring_buffer_consume - return an event and consume it
2579  * @buffer: The ring buffer to get the next event from
2580  *
2581  * Returns the next event in the ring buffer, and that event is consumed.
2582  * Meaning, that sequential reads will keep returning a different event,
2583  * and eventually empty the ring buffer if the producer is slower.
2584  */
2585 struct ring_buffer_event *
2586 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
2587 {
2588         struct ring_buffer_per_cpu *cpu_buffer;
2589         struct ring_buffer_event *event = NULL;
2590         unsigned long flags;
2591         int dolock;
2592
2593         dolock = rb_ok_to_lock();
2594
2595  again:
2596         /* might be called in atomic */
2597         preempt_disable();
2598
2599         if (!cpumask_test_cpu(cpu, buffer->cpumask))
2600                 goto out;
2601
2602         cpu_buffer = buffer->buffers[cpu];
2603         local_irq_save(flags);
2604         if (dolock)
2605                 spin_lock(&cpu_buffer->reader_lock);
2606
2607         event = rb_buffer_peek(buffer, cpu, ts);
2608         if (!event)
2609                 goto out_unlock;
2610
2611         rb_advance_reader(cpu_buffer);
2612
2613  out_unlock:
2614         if (dolock)
2615                 spin_unlock(&cpu_buffer->reader_lock);
2616         local_irq_restore(flags);
2617
2618  out:
2619         preempt_enable();
2620
2621         if (event && event->type_len == RINGBUF_TYPE_PADDING) {
2622                 cpu_relax();
2623                 goto again;
2624         }
2625
2626         return event;
2627 }
2628 EXPORT_SYMBOL_GPL(ring_buffer_consume);
2629
2630 /**
2631  * ring_buffer_read_start - start a non consuming read of the buffer
2632  * @buffer: The ring buffer to read from
2633  * @cpu: The cpu buffer to iterate over
2634  *
2635  * This starts up an iteration through the buffer. It also disables
2636  * the recording to the buffer until the reading is finished.
2637  * This prevents the reading from being corrupted. This is not
2638  * a consuming read, so a producer is not expected.
2639  *
2640  * Must be paired with ring_buffer_finish.
2641  */
2642 struct ring_buffer_iter *
2643 ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
2644 {
2645         struct ring_buffer_per_cpu *cpu_buffer;
2646         struct ring_buffer_iter *iter;
2647         unsigned long flags;
2648
2649         if (!cpumask_test_cpu(cpu, buffer->cpumask))
2650                 return NULL;
2651
2652         iter = kmalloc(sizeof(*iter), GFP_KERNEL);
2653         if (!iter)
2654                 return NULL;
2655
2656         cpu_buffer = buffer->buffers[cpu];
2657
2658         iter->cpu_buffer = cpu_buffer;
2659
2660         atomic_inc(&cpu_buffer->record_disabled);
2661         synchronize_sched();
2662
2663         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2664         __raw_spin_lock(&cpu_buffer->lock);
2665         rb_iter_reset(iter);
2666         __raw_spin_unlock(&cpu_buffer->lock);
2667         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2668
2669         return iter;
2670 }
2671 EXPORT_SYMBOL_GPL(ring_buffer_read_start);
2672
2673 /**
2674  * ring_buffer_finish - finish reading the iterator of the buffer
2675  * @iter: The iterator retrieved by ring_buffer_start
2676  *
2677  * This re-enables the recording to the buffer, and frees the
2678  * iterator.
2679  */
2680 void
2681 ring_buffer_read_finish(struct ring_buffer_iter *iter)
2682 {
2683         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2684
2685         atomic_dec(&cpu_buffer->record_disabled);
2686         kfree(iter);
2687 }
2688 EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
2689
2690 /**
2691  * ring_buffer_read - read the next item in the ring buffer by the iterator
2692  * @iter: The ring buffer iterator
2693  * @ts: The time stamp of the event read.
2694  *
2695  * This reads the next event in the ring buffer and increments the iterator.
2696  */
2697 struct ring_buffer_event *
2698 ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
2699 {
2700         struct ring_buffer_event *event;
2701         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2702         unsigned long flags;
2703
2704  again:
2705         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2706         event = rb_iter_peek(iter, ts);
2707         if (!event)
2708                 goto out;
2709
2710         rb_advance_iter(iter);
2711  out:
2712         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2713
2714         if (event && event->type_len == RINGBUF_TYPE_PADDING) {
2715                 cpu_relax();
2716                 goto again;
2717         }
2718
2719         return event;
2720 }
2721 EXPORT_SYMBOL_GPL(ring_buffer_read);
2722
2723 /**
2724  * ring_buffer_size - return the size of the ring buffer (in bytes)
2725  * @buffer: The ring buffer.
2726  */
2727 unsigned long ring_buffer_size(struct ring_buffer *buffer)
2728 {
2729         return BUF_PAGE_SIZE * buffer->pages;
2730 }
2731 EXPORT_SYMBOL_GPL(ring_buffer_size);
2732
2733 static void
2734 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
2735 {
2736         cpu_buffer->head_page
2737                 = list_entry(cpu_buffer->pages, struct buffer_page, list);
2738         local_set(&cpu_buffer->head_page->write, 0);
2739         local_set(&cpu_buffer->head_page->entries, 0);
2740         local_set(&cpu_buffer->head_page->page->commit, 0);
2741
2742         cpu_buffer->head_page->read = 0;
2743
2744         cpu_buffer->tail_page = cpu_buffer->head_page;
2745         cpu_buffer->commit_page = cpu_buffer->head_page;
2746
2747         INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
2748         local_set(&cpu_buffer->reader_page->write, 0);
2749         local_set(&cpu_buffer->reader_page->entries, 0);
2750         local_set(&cpu_buffer->reader_page->page->commit, 0);
2751         cpu_buffer->reader_page->read = 0;
2752
2753         cpu_buffer->nmi_dropped = 0;
2754         cpu_buffer->commit_overrun = 0;
2755         cpu_buffer->overrun = 0;
2756         cpu_buffer->read = 0;
2757         local_set(&cpu_buffer->entries, 0);
2758         local_set(&cpu_buffer->committing, 0);
2759         local_set(&cpu_buffer->commits, 0);
2760
2761         cpu_buffer->write_stamp = 0;
2762         cpu_buffer->read_stamp = 0;
2763 }
2764
2765 /**
2766  * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
2767  * @buffer: The ring buffer to reset a per cpu buffer of
2768  * @cpu: The CPU buffer to be reset
2769  */
2770 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2771 {
2772         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2773         unsigned long flags;
2774
2775         if (!cpumask_test_cpu(cpu, buffer->cpumask))
2776                 return;
2777
2778         atomic_inc(&cpu_buffer->record_disabled);
2779
2780         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2781
2782         __raw_spin_lock(&cpu_buffer->lock);
2783
2784         rb_reset_cpu(cpu_buffer);
2785
2786         __raw_spin_unlock(&cpu_buffer->lock);
2787
2788         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2789
2790         atomic_dec(&cpu_buffer->record_disabled);
2791 }
2792 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
2793
2794 /**
2795  * ring_buffer_reset - reset a ring buffer
2796  * @buffer: The ring buffer to reset all cpu buffers
2797  */
2798 void ring_buffer_reset(struct ring_buffer *buffer)
2799 {
2800         int cpu;
2801
2802         for_each_buffer_cpu(buffer, cpu)
2803                 ring_buffer_reset_cpu(buffer, cpu);
2804 }
2805 EXPORT_SYMBOL_GPL(ring_buffer_reset);
2806
2807 /**
2808  * rind_buffer_empty - is the ring buffer empty?
2809  * @buffer: The ring buffer to test
2810  */
2811 int ring_buffer_empty(struct ring_buffer *buffer)
2812 {
2813         struct ring_buffer_per_cpu *cpu_buffer;
2814         unsigned long flags;
2815         int dolock;
2816         int cpu;
2817         int ret;
2818
2819         dolock = rb_ok_to_lock();
2820
2821         /* yes this is racy, but if you don't like the race, lock the buffer */
2822         for_each_buffer_cpu(buffer, cpu) {
2823                 cpu_buffer = buffer->buffers[cpu];
2824                 local_irq_save(flags);
2825                 if (dolock)
2826                         spin_lock(&cpu_buffer->reader_lock);
2827                 ret = rb_per_cpu_empty(cpu_buffer);
2828                 if (dolock)
2829                         spin_unlock(&cpu_buffer->reader_lock);
2830                 local_irq_restore(flags);
2831
2832                 if (!ret)
2833                         return 0;
2834         }
2835
2836         return 1;
2837 }
2838 EXPORT_SYMBOL_GPL(ring_buffer_empty);
2839
2840 /**
2841  * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
2842  * @buffer: The ring buffer
2843  * @cpu: The CPU buffer to test
2844  */
2845 int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2846 {
2847         struct ring_buffer_per_cpu *cpu_buffer;
2848         unsigned long flags;
2849         int dolock;
2850         int ret;
2851
2852         if (!cpumask_test_cpu(cpu, buffer->cpumask))
2853                 return 1;
2854
2855         dolock = rb_ok_to_lock();
2856
2857         cpu_buffer = buffer->buffers[cpu];
2858         local_irq_save(flags);
2859         if (dolock)
2860                 spin_lock(&cpu_buffer->reader_lock);
2861         ret = rb_per_cpu_empty(cpu_buffer);
2862         if (dolock)
2863                 spin_unlock(&cpu_buffer->reader_lock);
2864         local_irq_restore(flags);
2865
2866         return ret;
2867 }
2868 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
2869
2870 /**
2871  * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
2872  * @buffer_a: One buffer to swap with
2873  * @buffer_b: The other buffer to swap with
2874  *
2875  * This function is useful for tracers that want to take a "snapshot"
2876  * of a CPU buffer and has another back up buffer lying around.
2877  * it is expected that the tracer handles the cpu buffer not being
2878  * used at the moment.
2879  */
2880 int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2881                          struct ring_buffer *buffer_b, int cpu)
2882 {
2883         struct ring_buffer_per_cpu *cpu_buffer_a;
2884         struct ring_buffer_per_cpu *cpu_buffer_b;
2885         int ret = -EINVAL;
2886
2887         if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
2888             !cpumask_test_cpu(cpu, buffer_b->cpumask))
2889                 goto out;
2890
2891         /* At least make sure the two buffers are somewhat the same */
2892         if (buffer_a->pages != buffer_b->pages)
2893                 goto out;
2894
2895         ret = -EAGAIN;
2896
2897         if (ring_buffer_flags != RB_BUFFERS_ON)
2898                 goto out;
2899
2900         if (atomic_read(&buffer_a->record_disabled))
2901                 goto out;
2902
2903         if (atomic_read(&buffer_b->record_disabled))
2904                 goto out;
2905
2906         cpu_buffer_a = buffer_a->buffers[cpu];
2907         cpu_buffer_b = buffer_b->buffers[cpu];
2908
2909         if (atomic_read(&cpu_buffer_a->record_disabled))
2910                 goto out;
2911
2912         if (atomic_read(&cpu_buffer_b->record_disabled))
2913                 goto out;
2914
2915         /*
2916          * We can't do a synchronize_sched here because this
2917          * function can be called in atomic context.
2918          * Normally this will be called from the same CPU as cpu.
2919          * If not it's up to the caller to protect this.
2920          */
2921         atomic_inc(&cpu_buffer_a->record_disabled);
2922         atomic_inc(&cpu_buffer_b->record_disabled);
2923
2924         buffer_a->buffers[cpu] = cpu_buffer_b;
2925         buffer_b->buffers[cpu] = cpu_buffer_a;
2926
2927         cpu_buffer_b->buffer = buffer_a;
2928         cpu_buffer_a->buffer = buffer_b;
2929
2930         atomic_dec(&cpu_buffer_a->record_disabled);
2931         atomic_dec(&cpu_buffer_b->record_disabled);
2932
2933         ret = 0;
2934 out:
2935         return ret;
2936 }
2937 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
2938
2939 /**
2940  * ring_buffer_alloc_read_page - allocate a page to read from buffer
2941  * @buffer: the buffer to allocate for.
2942  *
2943  * This function is used in conjunction with ring_buffer_read_page.
2944  * When reading a full page from the ring buffer, these functions
2945  * can be used to speed up the process. The calling function should
2946  * allocate a few pages first with this function. Then when it
2947  * needs to get pages from the ring buffer, it passes the result
2948  * of this function into ring_buffer_read_page, which will swap
2949  * the page that was allocated, with the read page of the buffer.
2950  *
2951  * Returns:
2952  *  The page allocated, or NULL on error.
2953  */
2954 void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
2955 {
2956         struct buffer_data_page *bpage;
2957         unsigned long addr;
2958
2959         addr = __get_free_page(GFP_KERNEL);
2960         if (!addr)
2961                 return NULL;
2962
2963         bpage = (void *)addr;
2964
2965         rb_init_page(bpage);
2966
2967         return bpage;
2968 }
2969 EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
2970
2971 /**
2972  * ring_buffer_free_read_page - free an allocated read page
2973  * @buffer: the buffer the page was allocate for
2974  * @data: the page to free
2975  *
2976  * Free a page allocated from ring_buffer_alloc_read_page.
2977  */
2978 void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
2979 {
2980         free_page((unsigned long)data);
2981 }
2982 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
2983
2984 /**
2985  * ring_buffer_read_page - extract a page from the ring buffer
2986  * @buffer: buffer to extract from
2987  * @data_page: the page to use allocated from ring_buffer_alloc_read_page
2988  * @len: amount to extract
2989  * @cpu: the cpu of the buffer to extract
2990  * @full: should the extraction only happen when the page is full.
2991  *
2992  * This function will pull out a page from the ring buffer and consume it.
2993  * @data_page must be the address of the variable that was returned
2994  * from ring_buffer_alloc_read_page. This is because the page might be used
2995  * to swap with a page in the ring buffer.
2996  *
2997  * for example:
2998  *      rpage = ring_buffer_alloc_read_page(buffer);
2999  *      if (!rpage)
3000  *              return error;
3001  *      ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
3002  *      if (ret >= 0)
3003  *              process_page(rpage, ret);
3004  *
3005  * When @full is set, the function will not return true unless
3006  * the writer is off the reader page.
3007  *
3008  * Note: it is up to the calling functions to handle sleeps and wakeups.
3009  *  The ring buffer can be used anywhere in the kernel and can not
3010  *  blindly call wake_up. The layer that uses the ring buffer must be
3011  *  responsible for that.
3012  *
3013  * Returns:
3014  *  >=0 if data has been transferred, returns the offset of consumed data.
3015  *  <0 if no data has been transferred.
3016  */
3017 int ring_buffer_read_page(struct ring_buffer *buffer,
3018                           void **data_page, size_t len, int cpu, int full)
3019 {
3020         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3021         struct ring_buffer_event *event;
3022         struct buffer_data_page *bpage;
3023         struct buffer_page *reader;
3024         unsigned long flags;
3025         unsigned int commit;
3026         unsigned int read;
3027         u64 save_timestamp;
3028         int ret = -1;
3029
3030         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3031                 goto out;
3032
3033         /*
3034          * If len is not big enough to hold the page header, then
3035          * we can not copy anything.
3036          */
3037         if (len <= BUF_PAGE_HDR_SIZE)
3038                 goto out;
3039
3040         len -= BUF_PAGE_HDR_SIZE;
3041
3042         if (!data_page)
3043                 goto out;
3044
3045         bpage = *data_page;
3046         if (!bpage)
3047                 goto out;
3048
3049         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3050
3051         reader = rb_get_reader_page(cpu_buffer);
3052         if (!reader)
3053                 goto out_unlock;
3054
3055         event = rb_reader_event(cpu_buffer);
3056
3057         read = reader->read;
3058         commit = rb_page_commit(reader);
3059
3060         /*
3061          * If this page has been partially read or
3062          * if len is not big enough to read the rest of the page or
3063          * a writer is still on the page, then
3064          * we must copy the data from the page to the buffer.
3065          * Otherwise, we can simply swap the page with the one passed in.
3066          */
3067         if (read || (len < (commit - read)) ||
3068             cpu_buffer->reader_page == cpu_buffer->commit_page) {
3069                 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
3070                 unsigned int rpos = read;
3071                 unsigned int pos = 0;
3072                 unsigned int size;
3073
3074                 if (full)
3075                         goto out_unlock;
3076
3077                 if (len > (commit - read))
3078                         len = (commit - read);
3079
3080                 size = rb_event_length(event);
3081
3082                 if (len < size)
3083                         goto out_unlock;
3084
3085                 /* save the current timestamp, since the user will need it */
3086                 save_timestamp = cpu_buffer->read_stamp;
3087
3088                 /* Need to copy one event at a time */
3089                 do {
3090                         memcpy(bpage->data + pos, rpage->data + rpos, size);
3091
3092                         len -= size;
3093
3094                         rb_advance_reader(cpu_buffer);
3095                         rpos = reader->read;
3096                         pos += size;
3097
3098                         event = rb_reader_event(cpu_buffer);
3099                         size = rb_event_length(event);
3100                 } while (len > size);
3101
3102                 /* update bpage */
3103                 local_set(&bpage->commit, pos);
3104                 bpage->time_stamp = save_timestamp;
3105
3106                 /* we copied everything to the beginning */
3107                 read = 0;
3108         } else {
3109                 /* update the entry counter */
3110                 cpu_buffer->read += local_read(&reader->entries);
3111
3112                 /* swap the pages */
3113                 rb_init_page(bpage);
3114                 bpage = reader->page;
3115                 reader->page = *data_page;
3116                 local_set(&reader->write, 0);
3117                 local_set(&reader->entries, 0);
3118                 reader->read = 0;
3119                 *data_page = bpage;
3120         }
3121         ret = read;
3122
3123  out_unlock:
3124         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3125
3126  out:
3127         return ret;
3128 }
3129 EXPORT_SYMBOL_GPL(ring_buffer_read_page);
3130
3131 #ifdef CONFIG_TRACING
3132 static ssize_t
3133 rb_simple_read(struct file *filp, char __user *ubuf,
3134                size_t cnt, loff_t *ppos)
3135 {
3136         unsigned long *p = filp->private_data;
3137         char buf[64];
3138         int r;
3139
3140         if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
3141                 r = sprintf(buf, "permanently disabled\n");
3142         else
3143                 r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
3144
3145         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3146 }
3147
3148 static ssize_t
3149 rb_simple_write(struct file *filp, const char __user *ubuf,
3150                 size_t cnt, loff_t *ppos)
3151 {
3152         unsigned long *p = filp->private_data;
3153         char buf[64];
3154         unsigned long val;
3155         int ret;
3156
3157         if (cnt >= sizeof(buf))
3158                 return -EINVAL;
3159
3160         if (copy_from_user(&buf, ubuf, cnt))
3161                 return -EFAULT;
3162
3163         buf[cnt] = 0;
3164
3165         ret = strict_strtoul(buf, 10, &val);
3166         if (ret < 0)
3167                 return ret;
3168
3169         if (val)
3170                 set_bit(RB_BUFFERS_ON_BIT, p);
3171         else
3172                 clear_bit(RB_BUFFERS_ON_BIT, p);
3173
3174         (*ppos)++;
3175
3176         return cnt;
3177 }
3178
3179 static const struct file_operations rb_simple_fops = {
3180         .open           = tracing_open_generic,
3181         .read           = rb_simple_read,
3182         .write          = rb_simple_write,
3183 };
3184
3185
3186 static __init int rb_init_debugfs(void)
3187 {
3188         struct dentry *d_tracer;
3189
3190         d_tracer = tracing_init_dentry();
3191
3192         trace_create_file("tracing_on", 0644, d_tracer,
3193                             &ring_buffer_flags, &rb_simple_fops);
3194
3195         return 0;
3196 }
3197
3198 fs_initcall(rb_init_debugfs);
3199 #endif
3200
3201 #ifdef CONFIG_HOTPLUG_CPU
3202 static int rb_cpu_notify(struct notifier_block *self,
3203                          unsigned long action, void *hcpu)
3204 {
3205         struct ring_buffer *buffer =
3206                 container_of(self, struct ring_buffer, cpu_notify);
3207         long cpu = (long)hcpu;
3208
3209         switch (action) {
3210         case CPU_UP_PREPARE:
3211         case CPU_UP_PREPARE_FROZEN:
3212                 if (cpumask_test_cpu(cpu, buffer->cpumask))
3213                         return NOTIFY_OK;
3214
3215                 buffer->buffers[cpu] =
3216                         rb_allocate_cpu_buffer(buffer, cpu);
3217                 if (!buffer->buffers[cpu]) {
3218                         WARN(1, "failed to allocate ring buffer on CPU %ld\n",
3219                              cpu);
3220                         return NOTIFY_OK;
3221                 }
3222                 smp_wmb();
3223                 cpumask_set_cpu(cpu, buffer->cpumask);
3224                 break;
3225         case CPU_DOWN_PREPARE:
3226         case CPU_DOWN_PREPARE_FROZEN:
3227                 /*
3228                  * Do nothing.
3229                  *  If we were to free the buffer, then the user would
3230                  *  lose any trace that was in the buffer.
3231                  */
3232                 break;
3233         default:
3234                 break;
3235         }
3236         return NOTIFY_OK;
3237 }
3238 #endif