ring-buffer: no preempt for sched_clock()
[safe/jmp/linux-2.6] / kernel / trace / ring_buffer.c
1 /*
2  * Generic ring buffer
3  *
4  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5  */
6 #include <linux/ring_buffer.h>
7 #include <linux/spinlock.h>
8 #include <linux/debugfs.h>
9 #include <linux/uaccess.h>
10 #include <linux/module.h>
11 #include <linux/percpu.h>
12 #include <linux/mutex.h>
13 #include <linux/sched.h>        /* used for sched_clock() (for now) */
14 #include <linux/init.h>
15 #include <linux/hash.h>
16 #include <linux/list.h>
17 #include <linux/fs.h>
18
19 #include "trace.h"
20
21 /* Global flag to disable all recording to ring buffers */
22 static int ring_buffers_off __read_mostly;
23
24 /**
25  * tracing_on - enable all tracing buffers
26  *
27  * This function enables all tracing buffers that may have been
28  * disabled with tracing_off.
29  */
30 void tracing_on(void)
31 {
32         ring_buffers_off = 0;
33 }
34
35 /**
36  * tracing_off - turn off all tracing buffers
37  *
38  * This function stops all tracing buffers from recording data.
39  * It does not disable any overhead the tracers themselves may
40  * be causing. This function simply causes all recording to
41  * the ring buffers to fail.
42  */
43 void tracing_off(void)
44 {
45         ring_buffers_off = 1;
46 }
47
48 /* Up this if you want to test the TIME_EXTENTS and normalization */
49 #define DEBUG_SHIFT 0
50
51 /* FIXME!!! */
52 u64 ring_buffer_time_stamp(int cpu)
53 {
54         u64 time;
55
56         preempt_disable_notrace();
57         /* shift to debug/test normalization and TIME_EXTENTS */
58         time = sched_clock() << DEBUG_SHIFT;
59         preempt_enable_notrace();
60
61         return time;
62 }
63
64 void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
65 {
66         /* Just stupid testing the normalize function and deltas */
67         *ts >>= DEBUG_SHIFT;
68 }
69
70 #define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
71 #define RB_ALIGNMENT_SHIFT      2
72 #define RB_ALIGNMENT            (1 << RB_ALIGNMENT_SHIFT)
73 #define RB_MAX_SMALL_DATA       28
74
75 enum {
76         RB_LEN_TIME_EXTEND = 8,
77         RB_LEN_TIME_STAMP = 16,
78 };
79
80 /* inline for ring buffer fast paths */
81 static inline unsigned
82 rb_event_length(struct ring_buffer_event *event)
83 {
84         unsigned length;
85
86         switch (event->type) {
87         case RINGBUF_TYPE_PADDING:
88                 /* undefined */
89                 return -1;
90
91         case RINGBUF_TYPE_TIME_EXTEND:
92                 return RB_LEN_TIME_EXTEND;
93
94         case RINGBUF_TYPE_TIME_STAMP:
95                 return RB_LEN_TIME_STAMP;
96
97         case RINGBUF_TYPE_DATA:
98                 if (event->len)
99                         length = event->len << RB_ALIGNMENT_SHIFT;
100                 else
101                         length = event->array[0];
102                 return length + RB_EVNT_HDR_SIZE;
103         default:
104                 BUG();
105         }
106         /* not hit */
107         return 0;
108 }
109
110 /**
111  * ring_buffer_event_length - return the length of the event
112  * @event: the event to get the length of
113  */
114 unsigned ring_buffer_event_length(struct ring_buffer_event *event)
115 {
116         return rb_event_length(event);
117 }
118
119 /* inline for ring buffer fast paths */
120 static inline void *
121 rb_event_data(struct ring_buffer_event *event)
122 {
123         BUG_ON(event->type != RINGBUF_TYPE_DATA);
124         /* If length is in len field, then array[0] has the data */
125         if (event->len)
126                 return (void *)&event->array[0];
127         /* Otherwise length is in array[0] and array[1] has the data */
128         return (void *)&event->array[1];
129 }
130
131 /**
132  * ring_buffer_event_data - return the data of the event
133  * @event: the event to get the data from
134  */
135 void *ring_buffer_event_data(struct ring_buffer_event *event)
136 {
137         return rb_event_data(event);
138 }
139
140 #define for_each_buffer_cpu(buffer, cpu)                \
141         for_each_cpu_mask(cpu, buffer->cpumask)
142
143 #define TS_SHIFT        27
144 #define TS_MASK         ((1ULL << TS_SHIFT) - 1)
145 #define TS_DELTA_TEST   (~TS_MASK)
146
147 /*
148  * This hack stolen from mm/slob.c.
149  * We can store per page timing information in the page frame of the page.
150  * Thanks to Peter Zijlstra for suggesting this idea.
151  */
152 struct buffer_page {
153         u64              time_stamp;    /* page time stamp */
154         local_t          write;         /* index for next write */
155         local_t          commit;        /* write commited index */
156         unsigned         read;          /* index for next read */
157         struct list_head list;          /* list of free pages */
158         void *page;                     /* Actual data page */
159 };
160
161 /*
162  * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
163  * this issue out.
164  */
165 static inline void free_buffer_page(struct buffer_page *bpage)
166 {
167         if (bpage->page)
168                 free_page((unsigned long)bpage->page);
169         kfree(bpage);
170 }
171
172 /*
173  * We need to fit the time_stamp delta into 27 bits.
174  */
175 static inline int test_time_stamp(u64 delta)
176 {
177         if (delta & TS_DELTA_TEST)
178                 return 1;
179         return 0;
180 }
181
182 #define BUF_PAGE_SIZE PAGE_SIZE
183
184 /*
185  * head_page == tail_page && head == tail then buffer is empty.
186  */
187 struct ring_buffer_per_cpu {
188         int                             cpu;
189         struct ring_buffer              *buffer;
190         spinlock_t                      lock;
191         struct lock_class_key           lock_key;
192         struct list_head                pages;
193         struct buffer_page              *head_page;     /* read from head */
194         struct buffer_page              *tail_page;     /* write to tail */
195         struct buffer_page              *commit_page;   /* commited pages */
196         struct buffer_page              *reader_page;
197         unsigned long                   overrun;
198         unsigned long                   entries;
199         u64                             write_stamp;
200         u64                             read_stamp;
201         atomic_t                        record_disabled;
202 };
203
204 struct ring_buffer {
205         unsigned long                   size;
206         unsigned                        pages;
207         unsigned                        flags;
208         int                             cpus;
209         cpumask_t                       cpumask;
210         atomic_t                        record_disabled;
211
212         struct mutex                    mutex;
213
214         struct ring_buffer_per_cpu      **buffers;
215 };
216
217 struct ring_buffer_iter {
218         struct ring_buffer_per_cpu      *cpu_buffer;
219         unsigned long                   head;
220         struct buffer_page              *head_page;
221         u64                             read_stamp;
222 };
223
224 #define RB_WARN_ON(buffer, cond)                                \
225         do {                                                    \
226                 if (unlikely(cond)) {                           \
227                         atomic_inc(&buffer->record_disabled);   \
228                         WARN_ON(1);                             \
229                 }                                               \
230         } while (0)
231
232 #define RB_WARN_ON_RET(buffer, cond)                            \
233         do {                                                    \
234                 if (unlikely(cond)) {                           \
235                         atomic_inc(&buffer->record_disabled);   \
236                         WARN_ON(1);                             \
237                         return -1;                              \
238                 }                                               \
239         } while (0)
240
241 #define RB_WARN_ON_ONCE(buffer, cond)                           \
242         do {                                                    \
243                 static int once;                                \
244                 if (unlikely(cond) && !once) {                  \
245                         once++;                                 \
246                         atomic_inc(&buffer->record_disabled);   \
247                         WARN_ON(1);                             \
248                 }                                               \
249         } while (0)
250
251 /**
252  * check_pages - integrity check of buffer pages
253  * @cpu_buffer: CPU buffer with pages to test
254  *
255  * As a safty measure we check to make sure the data pages have not
256  * been corrupted.
257  */
258 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
259 {
260         struct list_head *head = &cpu_buffer->pages;
261         struct buffer_page *page, *tmp;
262
263         RB_WARN_ON_RET(cpu_buffer, head->next->prev != head);
264         RB_WARN_ON_RET(cpu_buffer, head->prev->next != head);
265
266         list_for_each_entry_safe(page, tmp, head, list) {
267                 RB_WARN_ON_RET(cpu_buffer,
268                                page->list.next->prev != &page->list);
269                 RB_WARN_ON_RET(cpu_buffer,
270                                page->list.prev->next != &page->list);
271         }
272
273         return 0;
274 }
275
276 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
277                              unsigned nr_pages)
278 {
279         struct list_head *head = &cpu_buffer->pages;
280         struct buffer_page *page, *tmp;
281         unsigned long addr;
282         LIST_HEAD(pages);
283         unsigned i;
284
285         for (i = 0; i < nr_pages; i++) {
286                 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
287                                     GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
288                 if (!page)
289                         goto free_pages;
290                 list_add(&page->list, &pages);
291
292                 addr = __get_free_page(GFP_KERNEL);
293                 if (!addr)
294                         goto free_pages;
295                 page->page = (void *)addr;
296         }
297
298         list_splice(&pages, head);
299
300         rb_check_pages(cpu_buffer);
301
302         return 0;
303
304  free_pages:
305         list_for_each_entry_safe(page, tmp, &pages, list) {
306                 list_del_init(&page->list);
307                 free_buffer_page(page);
308         }
309         return -ENOMEM;
310 }
311
312 static struct ring_buffer_per_cpu *
313 rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
314 {
315         struct ring_buffer_per_cpu *cpu_buffer;
316         struct buffer_page *page;
317         unsigned long addr;
318         int ret;
319
320         cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
321                                   GFP_KERNEL, cpu_to_node(cpu));
322         if (!cpu_buffer)
323                 return NULL;
324
325         cpu_buffer->cpu = cpu;
326         cpu_buffer->buffer = buffer;
327         spin_lock_init(&cpu_buffer->lock);
328         INIT_LIST_HEAD(&cpu_buffer->pages);
329
330         page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
331                             GFP_KERNEL, cpu_to_node(cpu));
332         if (!page)
333                 goto fail_free_buffer;
334
335         cpu_buffer->reader_page = page;
336         addr = __get_free_page(GFP_KERNEL);
337         if (!addr)
338                 goto fail_free_reader;
339         page->page = (void *)addr;
340
341         INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
342
343         ret = rb_allocate_pages(cpu_buffer, buffer->pages);
344         if (ret < 0)
345                 goto fail_free_reader;
346
347         cpu_buffer->head_page
348                 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
349         cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
350
351         return cpu_buffer;
352
353  fail_free_reader:
354         free_buffer_page(cpu_buffer->reader_page);
355
356  fail_free_buffer:
357         kfree(cpu_buffer);
358         return NULL;
359 }
360
361 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
362 {
363         struct list_head *head = &cpu_buffer->pages;
364         struct buffer_page *page, *tmp;
365
366         list_del_init(&cpu_buffer->reader_page->list);
367         free_buffer_page(cpu_buffer->reader_page);
368
369         list_for_each_entry_safe(page, tmp, head, list) {
370                 list_del_init(&page->list);
371                 free_buffer_page(page);
372         }
373         kfree(cpu_buffer);
374 }
375
376 /*
377  * Causes compile errors if the struct buffer_page gets bigger
378  * than the struct page.
379  */
380 extern int ring_buffer_page_too_big(void);
381
382 /**
383  * ring_buffer_alloc - allocate a new ring_buffer
384  * @size: the size in bytes that is needed.
385  * @flags: attributes to set for the ring buffer.
386  *
387  * Currently the only flag that is available is the RB_FL_OVERWRITE
388  * flag. This flag means that the buffer will overwrite old data
389  * when the buffer wraps. If this flag is not set, the buffer will
390  * drop data when the tail hits the head.
391  */
392 struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
393 {
394         struct ring_buffer *buffer;
395         int bsize;
396         int cpu;
397
398         /* Paranoid! Optimizes out when all is well */
399         if (sizeof(struct buffer_page) > sizeof(struct page))
400                 ring_buffer_page_too_big();
401
402
403         /* keep it in its own cache line */
404         buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
405                          GFP_KERNEL);
406         if (!buffer)
407                 return NULL;
408
409         buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
410         buffer->flags = flags;
411
412         /* need at least two pages */
413         if (buffer->pages == 1)
414                 buffer->pages++;
415
416         buffer->cpumask = cpu_possible_map;
417         buffer->cpus = nr_cpu_ids;
418
419         bsize = sizeof(void *) * nr_cpu_ids;
420         buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
421                                   GFP_KERNEL);
422         if (!buffer->buffers)
423                 goto fail_free_buffer;
424
425         for_each_buffer_cpu(buffer, cpu) {
426                 buffer->buffers[cpu] =
427                         rb_allocate_cpu_buffer(buffer, cpu);
428                 if (!buffer->buffers[cpu])
429                         goto fail_free_buffers;
430         }
431
432         mutex_init(&buffer->mutex);
433
434         return buffer;
435
436  fail_free_buffers:
437         for_each_buffer_cpu(buffer, cpu) {
438                 if (buffer->buffers[cpu])
439                         rb_free_cpu_buffer(buffer->buffers[cpu]);
440         }
441         kfree(buffer->buffers);
442
443  fail_free_buffer:
444         kfree(buffer);
445         return NULL;
446 }
447
448 /**
449  * ring_buffer_free - free a ring buffer.
450  * @buffer: the buffer to free.
451  */
452 void
453 ring_buffer_free(struct ring_buffer *buffer)
454 {
455         int cpu;
456
457         for_each_buffer_cpu(buffer, cpu)
458                 rb_free_cpu_buffer(buffer->buffers[cpu]);
459
460         kfree(buffer);
461 }
462
463 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
464
465 static void
466 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
467 {
468         struct buffer_page *page;
469         struct list_head *p;
470         unsigned i;
471
472         atomic_inc(&cpu_buffer->record_disabled);
473         synchronize_sched();
474
475         for (i = 0; i < nr_pages; i++) {
476                 BUG_ON(list_empty(&cpu_buffer->pages));
477                 p = cpu_buffer->pages.next;
478                 page = list_entry(p, struct buffer_page, list);
479                 list_del_init(&page->list);
480                 free_buffer_page(page);
481         }
482         BUG_ON(list_empty(&cpu_buffer->pages));
483
484         rb_reset_cpu(cpu_buffer);
485
486         rb_check_pages(cpu_buffer);
487
488         atomic_dec(&cpu_buffer->record_disabled);
489
490 }
491
492 static void
493 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
494                 struct list_head *pages, unsigned nr_pages)
495 {
496         struct buffer_page *page;
497         struct list_head *p;
498         unsigned i;
499
500         atomic_inc(&cpu_buffer->record_disabled);
501         synchronize_sched();
502
503         for (i = 0; i < nr_pages; i++) {
504                 BUG_ON(list_empty(pages));
505                 p = pages->next;
506                 page = list_entry(p, struct buffer_page, list);
507                 list_del_init(&page->list);
508                 list_add_tail(&page->list, &cpu_buffer->pages);
509         }
510         rb_reset_cpu(cpu_buffer);
511
512         rb_check_pages(cpu_buffer);
513
514         atomic_dec(&cpu_buffer->record_disabled);
515 }
516
517 /**
518  * ring_buffer_resize - resize the ring buffer
519  * @buffer: the buffer to resize.
520  * @size: the new size.
521  *
522  * The tracer is responsible for making sure that the buffer is
523  * not being used while changing the size.
524  * Note: We may be able to change the above requirement by using
525  *  RCU synchronizations.
526  *
527  * Minimum size is 2 * BUF_PAGE_SIZE.
528  *
529  * Returns -1 on failure.
530  */
531 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
532 {
533         struct ring_buffer_per_cpu *cpu_buffer;
534         unsigned nr_pages, rm_pages, new_pages;
535         struct buffer_page *page, *tmp;
536         unsigned long buffer_size;
537         unsigned long addr;
538         LIST_HEAD(pages);
539         int i, cpu;
540
541         size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
542         size *= BUF_PAGE_SIZE;
543         buffer_size = buffer->pages * BUF_PAGE_SIZE;
544
545         /* we need a minimum of two pages */
546         if (size < BUF_PAGE_SIZE * 2)
547                 size = BUF_PAGE_SIZE * 2;
548
549         if (size == buffer_size)
550                 return size;
551
552         mutex_lock(&buffer->mutex);
553
554         nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
555
556         if (size < buffer_size) {
557
558                 /* easy case, just free pages */
559                 BUG_ON(nr_pages >= buffer->pages);
560
561                 rm_pages = buffer->pages - nr_pages;
562
563                 for_each_buffer_cpu(buffer, cpu) {
564                         cpu_buffer = buffer->buffers[cpu];
565                         rb_remove_pages(cpu_buffer, rm_pages);
566                 }
567                 goto out;
568         }
569
570         /*
571          * This is a bit more difficult. We only want to add pages
572          * when we can allocate enough for all CPUs. We do this
573          * by allocating all the pages and storing them on a local
574          * link list. If we succeed in our allocation, then we
575          * add these pages to the cpu_buffers. Otherwise we just free
576          * them all and return -ENOMEM;
577          */
578         BUG_ON(nr_pages <= buffer->pages);
579         new_pages = nr_pages - buffer->pages;
580
581         for_each_buffer_cpu(buffer, cpu) {
582                 for (i = 0; i < new_pages; i++) {
583                         page = kzalloc_node(ALIGN(sizeof(*page),
584                                                   cache_line_size()),
585                                             GFP_KERNEL, cpu_to_node(cpu));
586                         if (!page)
587                                 goto free_pages;
588                         list_add(&page->list, &pages);
589                         addr = __get_free_page(GFP_KERNEL);
590                         if (!addr)
591                                 goto free_pages;
592                         page->page = (void *)addr;
593                 }
594         }
595
596         for_each_buffer_cpu(buffer, cpu) {
597                 cpu_buffer = buffer->buffers[cpu];
598                 rb_insert_pages(cpu_buffer, &pages, new_pages);
599         }
600
601         BUG_ON(!list_empty(&pages));
602
603  out:
604         buffer->pages = nr_pages;
605         mutex_unlock(&buffer->mutex);
606
607         return size;
608
609  free_pages:
610         list_for_each_entry_safe(page, tmp, &pages, list) {
611                 list_del_init(&page->list);
612                 free_buffer_page(page);
613         }
614         return -ENOMEM;
615 }
616
617 static inline int rb_null_event(struct ring_buffer_event *event)
618 {
619         return event->type == RINGBUF_TYPE_PADDING;
620 }
621
622 static inline void *__rb_page_index(struct buffer_page *page, unsigned index)
623 {
624         return page->page + index;
625 }
626
627 static inline struct ring_buffer_event *
628 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
629 {
630         return __rb_page_index(cpu_buffer->reader_page,
631                                cpu_buffer->reader_page->read);
632 }
633
634 static inline struct ring_buffer_event *
635 rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
636 {
637         return __rb_page_index(cpu_buffer->head_page,
638                                cpu_buffer->head_page->read);
639 }
640
641 static inline struct ring_buffer_event *
642 rb_iter_head_event(struct ring_buffer_iter *iter)
643 {
644         return __rb_page_index(iter->head_page, iter->head);
645 }
646
647 static inline unsigned rb_page_write(struct buffer_page *bpage)
648 {
649         return local_read(&bpage->write);
650 }
651
652 static inline unsigned rb_page_commit(struct buffer_page *bpage)
653 {
654         return local_read(&bpage->commit);
655 }
656
657 /* Size is determined by what has been commited */
658 static inline unsigned rb_page_size(struct buffer_page *bpage)
659 {
660         return rb_page_commit(bpage);
661 }
662
663 static inline unsigned
664 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
665 {
666         return rb_page_commit(cpu_buffer->commit_page);
667 }
668
669 static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
670 {
671         return rb_page_commit(cpu_buffer->head_page);
672 }
673
674 /*
675  * When the tail hits the head and the buffer is in overwrite mode,
676  * the head jumps to the next page and all content on the previous
677  * page is discarded. But before doing so, we update the overrun
678  * variable of the buffer.
679  */
680 static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
681 {
682         struct ring_buffer_event *event;
683         unsigned long head;
684
685         for (head = 0; head < rb_head_size(cpu_buffer);
686              head += rb_event_length(event)) {
687
688                 event = __rb_page_index(cpu_buffer->head_page, head);
689                 BUG_ON(rb_null_event(event));
690                 /* Only count data entries */
691                 if (event->type != RINGBUF_TYPE_DATA)
692                         continue;
693                 cpu_buffer->overrun++;
694                 cpu_buffer->entries--;
695         }
696 }
697
698 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
699                                struct buffer_page **page)
700 {
701         struct list_head *p = (*page)->list.next;
702
703         if (p == &cpu_buffer->pages)
704                 p = p->next;
705
706         *page = list_entry(p, struct buffer_page, list);
707 }
708
709 static inline unsigned
710 rb_event_index(struct ring_buffer_event *event)
711 {
712         unsigned long addr = (unsigned long)event;
713
714         return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
715 }
716
717 static inline int
718 rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
719              struct ring_buffer_event *event)
720 {
721         unsigned long addr = (unsigned long)event;
722         unsigned long index;
723
724         index = rb_event_index(event);
725         addr &= PAGE_MASK;
726
727         return cpu_buffer->commit_page->page == (void *)addr &&
728                 rb_commit_index(cpu_buffer) == index;
729 }
730
731 static inline void
732 rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
733                     struct ring_buffer_event *event)
734 {
735         unsigned long addr = (unsigned long)event;
736         unsigned long index;
737
738         index = rb_event_index(event);
739         addr &= PAGE_MASK;
740
741         while (cpu_buffer->commit_page->page != (void *)addr) {
742                 RB_WARN_ON(cpu_buffer,
743                            cpu_buffer->commit_page == cpu_buffer->tail_page);
744                 cpu_buffer->commit_page->commit =
745                         cpu_buffer->commit_page->write;
746                 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
747                 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
748         }
749
750         /* Now set the commit to the event's index */
751         local_set(&cpu_buffer->commit_page->commit, index);
752 }
753
754 static inline void
755 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
756 {
757         /*
758          * We only race with interrupts and NMIs on this CPU.
759          * If we own the commit event, then we can commit
760          * all others that interrupted us, since the interruptions
761          * are in stack format (they finish before they come
762          * back to us). This allows us to do a simple loop to
763          * assign the commit to the tail.
764          */
765         while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
766                 cpu_buffer->commit_page->commit =
767                         cpu_buffer->commit_page->write;
768                 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
769                 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
770                 /* add barrier to keep gcc from optimizing too much */
771                 barrier();
772         }
773         while (rb_commit_index(cpu_buffer) !=
774                rb_page_write(cpu_buffer->commit_page)) {
775                 cpu_buffer->commit_page->commit =
776                         cpu_buffer->commit_page->write;
777                 barrier();
778         }
779 }
780
781 static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
782 {
783         cpu_buffer->read_stamp = cpu_buffer->reader_page->time_stamp;
784         cpu_buffer->reader_page->read = 0;
785 }
786
787 static inline void rb_inc_iter(struct ring_buffer_iter *iter)
788 {
789         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
790
791         /*
792          * The iterator could be on the reader page (it starts there).
793          * But the head could have moved, since the reader was
794          * found. Check for this case and assign the iterator
795          * to the head page instead of next.
796          */
797         if (iter->head_page == cpu_buffer->reader_page)
798                 iter->head_page = cpu_buffer->head_page;
799         else
800                 rb_inc_page(cpu_buffer, &iter->head_page);
801
802         iter->read_stamp = iter->head_page->time_stamp;
803         iter->head = 0;
804 }
805
806 /**
807  * ring_buffer_update_event - update event type and data
808  * @event: the even to update
809  * @type: the type of event
810  * @length: the size of the event field in the ring buffer
811  *
812  * Update the type and data fields of the event. The length
813  * is the actual size that is written to the ring buffer,
814  * and with this, we can determine what to place into the
815  * data field.
816  */
817 static inline void
818 rb_update_event(struct ring_buffer_event *event,
819                          unsigned type, unsigned length)
820 {
821         event->type = type;
822
823         switch (type) {
824
825         case RINGBUF_TYPE_PADDING:
826                 break;
827
828         case RINGBUF_TYPE_TIME_EXTEND:
829                 event->len =
830                         (RB_LEN_TIME_EXTEND + (RB_ALIGNMENT-1))
831                         >> RB_ALIGNMENT_SHIFT;
832                 break;
833
834         case RINGBUF_TYPE_TIME_STAMP:
835                 event->len =
836                         (RB_LEN_TIME_STAMP + (RB_ALIGNMENT-1))
837                         >> RB_ALIGNMENT_SHIFT;
838                 break;
839
840         case RINGBUF_TYPE_DATA:
841                 length -= RB_EVNT_HDR_SIZE;
842                 if (length > RB_MAX_SMALL_DATA) {
843                         event->len = 0;
844                         event->array[0] = length;
845                 } else
846                         event->len =
847                                 (length + (RB_ALIGNMENT-1))
848                                 >> RB_ALIGNMENT_SHIFT;
849                 break;
850         default:
851                 BUG();
852         }
853 }
854
855 static inline unsigned rb_calculate_event_length(unsigned length)
856 {
857         struct ring_buffer_event event; /* Used only for sizeof array */
858
859         /* zero length can cause confusions */
860         if (!length)
861                 length = 1;
862
863         if (length > RB_MAX_SMALL_DATA)
864                 length += sizeof(event.array[0]);
865
866         length += RB_EVNT_HDR_SIZE;
867         length = ALIGN(length, RB_ALIGNMENT);
868
869         return length;
870 }
871
872 static struct ring_buffer_event *
873 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
874                   unsigned type, unsigned long length, u64 *ts)
875 {
876         struct buffer_page *tail_page, *head_page, *reader_page;
877         unsigned long tail, write;
878         struct ring_buffer *buffer = cpu_buffer->buffer;
879         struct ring_buffer_event *event;
880         unsigned long flags;
881
882         tail_page = cpu_buffer->tail_page;
883         write = local_add_return(length, &tail_page->write);
884         tail = write - length;
885
886         /* See if we shot pass the end of this buffer page */
887         if (write > BUF_PAGE_SIZE) {
888                 struct buffer_page *next_page = tail_page;
889
890                 spin_lock_irqsave(&cpu_buffer->lock, flags);
891
892                 rb_inc_page(cpu_buffer, &next_page);
893
894                 head_page = cpu_buffer->head_page;
895                 reader_page = cpu_buffer->reader_page;
896
897                 /* we grabbed the lock before incrementing */
898                 RB_WARN_ON(cpu_buffer, next_page == reader_page);
899
900                 /*
901                  * If for some reason, we had an interrupt storm that made
902                  * it all the way around the buffer, bail, and warn
903                  * about it.
904                  */
905                 if (unlikely(next_page == cpu_buffer->commit_page)) {
906                         WARN_ON_ONCE(1);
907                         goto out_unlock;
908                 }
909
910                 if (next_page == head_page) {
911                         if (!(buffer->flags & RB_FL_OVERWRITE)) {
912                                 /* reset write */
913                                 if (tail <= BUF_PAGE_SIZE)
914                                         local_set(&tail_page->write, tail);
915                                 goto out_unlock;
916                         }
917
918                         /* tail_page has not moved yet? */
919                         if (tail_page == cpu_buffer->tail_page) {
920                                 /* count overflows */
921                                 rb_update_overflow(cpu_buffer);
922
923                                 rb_inc_page(cpu_buffer, &head_page);
924                                 cpu_buffer->head_page = head_page;
925                                 cpu_buffer->head_page->read = 0;
926                         }
927                 }
928
929                 /*
930                  * If the tail page is still the same as what we think
931                  * it is, then it is up to us to update the tail
932                  * pointer.
933                  */
934                 if (tail_page == cpu_buffer->tail_page) {
935                         local_set(&next_page->write, 0);
936                         local_set(&next_page->commit, 0);
937                         cpu_buffer->tail_page = next_page;
938
939                         /* reread the time stamp */
940                         *ts = ring_buffer_time_stamp(cpu_buffer->cpu);
941                         cpu_buffer->tail_page->time_stamp = *ts;
942                 }
943
944                 /*
945                  * The actual tail page has moved forward.
946                  */
947                 if (tail < BUF_PAGE_SIZE) {
948                         /* Mark the rest of the page with padding */
949                         event = __rb_page_index(tail_page, tail);
950                         event->type = RINGBUF_TYPE_PADDING;
951                 }
952
953                 if (tail <= BUF_PAGE_SIZE)
954                         /* Set the write back to the previous setting */
955                         local_set(&tail_page->write, tail);
956
957                 /*
958                  * If this was a commit entry that failed,
959                  * increment that too
960                  */
961                 if (tail_page == cpu_buffer->commit_page &&
962                     tail == rb_commit_index(cpu_buffer)) {
963                         rb_set_commit_to_write(cpu_buffer);
964                 }
965
966                 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
967
968                 /* fail and let the caller try again */
969                 return ERR_PTR(-EAGAIN);
970         }
971
972         /* We reserved something on the buffer */
973
974         BUG_ON(write > BUF_PAGE_SIZE);
975
976         event = __rb_page_index(tail_page, tail);
977         rb_update_event(event, type, length);
978
979         /*
980          * If this is a commit and the tail is zero, then update
981          * this page's time stamp.
982          */
983         if (!tail && rb_is_commit(cpu_buffer, event))
984                 cpu_buffer->commit_page->time_stamp = *ts;
985
986         return event;
987
988  out_unlock:
989         spin_unlock_irqrestore(&cpu_buffer->lock, flags);
990         return NULL;
991 }
992
993 static int
994 rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
995                   u64 *ts, u64 *delta)
996 {
997         struct ring_buffer_event *event;
998         static int once;
999         int ret;
1000
1001         if (unlikely(*delta > (1ULL << 59) && !once++)) {
1002                 printk(KERN_WARNING "Delta way too big! %llu"
1003                        " ts=%llu write stamp = %llu\n",
1004                        (unsigned long long)*delta,
1005                        (unsigned long long)*ts,
1006                        (unsigned long long)cpu_buffer->write_stamp);
1007                 WARN_ON(1);
1008         }
1009
1010         /*
1011          * The delta is too big, we to add a
1012          * new timestamp.
1013          */
1014         event = __rb_reserve_next(cpu_buffer,
1015                                   RINGBUF_TYPE_TIME_EXTEND,
1016                                   RB_LEN_TIME_EXTEND,
1017                                   ts);
1018         if (!event)
1019                 return -EBUSY;
1020
1021         if (PTR_ERR(event) == -EAGAIN)
1022                 return -EAGAIN;
1023
1024         /* Only a commited time event can update the write stamp */
1025         if (rb_is_commit(cpu_buffer, event)) {
1026                 /*
1027                  * If this is the first on the page, then we need to
1028                  * update the page itself, and just put in a zero.
1029                  */
1030                 if (rb_event_index(event)) {
1031                         event->time_delta = *delta & TS_MASK;
1032                         event->array[0] = *delta >> TS_SHIFT;
1033                 } else {
1034                         cpu_buffer->commit_page->time_stamp = *ts;
1035                         event->time_delta = 0;
1036                         event->array[0] = 0;
1037                 }
1038                 cpu_buffer->write_stamp = *ts;
1039                 /* let the caller know this was the commit */
1040                 ret = 1;
1041         } else {
1042                 /* Darn, this is just wasted space */
1043                 event->time_delta = 0;
1044                 event->array[0] = 0;
1045                 ret = 0;
1046         }
1047
1048         *delta = 0;
1049
1050         return ret;
1051 }
1052
1053 static struct ring_buffer_event *
1054 rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1055                       unsigned type, unsigned long length)
1056 {
1057         struct ring_buffer_event *event;
1058         u64 ts, delta;
1059         int commit = 0;
1060         int nr_loops = 0;
1061
1062  again:
1063         /*
1064          * We allow for interrupts to reenter here and do a trace.
1065          * If one does, it will cause this original code to loop
1066          * back here. Even with heavy interrupts happening, this
1067          * should only happen a few times in a row. If this happens
1068          * 1000 times in a row, there must be either an interrupt
1069          * storm or we have something buggy.
1070          * Bail!
1071          */
1072         if (unlikely(++nr_loops > 1000)) {
1073                 RB_WARN_ON(cpu_buffer, 1);
1074                 return NULL;
1075         }
1076
1077         ts = ring_buffer_time_stamp(cpu_buffer->cpu);
1078
1079         /*
1080          * Only the first commit can update the timestamp.
1081          * Yes there is a race here. If an interrupt comes in
1082          * just after the conditional and it traces too, then it
1083          * will also check the deltas. More than one timestamp may
1084          * also be made. But only the entry that did the actual
1085          * commit will be something other than zero.
1086          */
1087         if (cpu_buffer->tail_page == cpu_buffer->commit_page &&
1088             rb_page_write(cpu_buffer->tail_page) ==
1089             rb_commit_index(cpu_buffer)) {
1090
1091                 delta = ts - cpu_buffer->write_stamp;
1092
1093                 /* make sure this delta is calculated here */
1094                 barrier();
1095
1096                 /* Did the write stamp get updated already? */
1097                 if (unlikely(ts < cpu_buffer->write_stamp))
1098                         delta = 0;
1099
1100                 if (test_time_stamp(delta)) {
1101
1102                         commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
1103
1104                         if (commit == -EBUSY)
1105                                 return NULL;
1106
1107                         if (commit == -EAGAIN)
1108                                 goto again;
1109
1110                         RB_WARN_ON(cpu_buffer, commit < 0);
1111                 }
1112         } else
1113                 /* Non commits have zero deltas */
1114                 delta = 0;
1115
1116         event = __rb_reserve_next(cpu_buffer, type, length, &ts);
1117         if (PTR_ERR(event) == -EAGAIN)
1118                 goto again;
1119
1120         if (!event) {
1121                 if (unlikely(commit))
1122                         /*
1123                          * Ouch! We needed a timestamp and it was commited. But
1124                          * we didn't get our event reserved.
1125                          */
1126                         rb_set_commit_to_write(cpu_buffer);
1127                 return NULL;
1128         }
1129
1130         /*
1131          * If the timestamp was commited, make the commit our entry
1132          * now so that we will update it when needed.
1133          */
1134         if (commit)
1135                 rb_set_commit_event(cpu_buffer, event);
1136         else if (!rb_is_commit(cpu_buffer, event))
1137                 delta = 0;
1138
1139         event->time_delta = delta;
1140
1141         return event;
1142 }
1143
1144 static DEFINE_PER_CPU(int, rb_need_resched);
1145
1146 /**
1147  * ring_buffer_lock_reserve - reserve a part of the buffer
1148  * @buffer: the ring buffer to reserve from
1149  * @length: the length of the data to reserve (excluding event header)
1150  * @flags: a pointer to save the interrupt flags
1151  *
1152  * Returns a reseverd event on the ring buffer to copy directly to.
1153  * The user of this interface will need to get the body to write into
1154  * and can use the ring_buffer_event_data() interface.
1155  *
1156  * The length is the length of the data needed, not the event length
1157  * which also includes the event header.
1158  *
1159  * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
1160  * If NULL is returned, then nothing has been allocated or locked.
1161  */
1162 struct ring_buffer_event *
1163 ring_buffer_lock_reserve(struct ring_buffer *buffer,
1164                          unsigned long length,
1165                          unsigned long *flags)
1166 {
1167         struct ring_buffer_per_cpu *cpu_buffer;
1168         struct ring_buffer_event *event;
1169         int cpu, resched;
1170
1171         if (ring_buffers_off)
1172                 return NULL;
1173
1174         if (atomic_read(&buffer->record_disabled))
1175                 return NULL;
1176
1177         /* If we are tracing schedule, we don't want to recurse */
1178         resched = need_resched();
1179         preempt_disable_notrace();
1180
1181         cpu = raw_smp_processor_id();
1182
1183         if (!cpu_isset(cpu, buffer->cpumask))
1184                 goto out;
1185
1186         cpu_buffer = buffer->buffers[cpu];
1187
1188         if (atomic_read(&cpu_buffer->record_disabled))
1189                 goto out;
1190
1191         length = rb_calculate_event_length(length);
1192         if (length > BUF_PAGE_SIZE)
1193                 goto out;
1194
1195         event = rb_reserve_next_event(cpu_buffer, RINGBUF_TYPE_DATA, length);
1196         if (!event)
1197                 goto out;
1198
1199         /*
1200          * Need to store resched state on this cpu.
1201          * Only the first needs to.
1202          */
1203
1204         if (preempt_count() == 1)
1205                 per_cpu(rb_need_resched, cpu) = resched;
1206
1207         return event;
1208
1209  out:
1210         if (resched)
1211                 preempt_enable_notrace();
1212         else
1213                 preempt_enable_notrace();
1214         return NULL;
1215 }
1216
1217 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1218                       struct ring_buffer_event *event)
1219 {
1220         cpu_buffer->entries++;
1221
1222         /* Only process further if we own the commit */
1223         if (!rb_is_commit(cpu_buffer, event))
1224                 return;
1225
1226         cpu_buffer->write_stamp += event->time_delta;
1227
1228         rb_set_commit_to_write(cpu_buffer);
1229 }
1230
1231 /**
1232  * ring_buffer_unlock_commit - commit a reserved
1233  * @buffer: The buffer to commit to
1234  * @event: The event pointer to commit.
1235  * @flags: the interrupt flags received from ring_buffer_lock_reserve.
1236  *
1237  * This commits the data to the ring buffer, and releases any locks held.
1238  *
1239  * Must be paired with ring_buffer_lock_reserve.
1240  */
1241 int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1242                               struct ring_buffer_event *event,
1243                               unsigned long flags)
1244 {
1245         struct ring_buffer_per_cpu *cpu_buffer;
1246         int cpu = raw_smp_processor_id();
1247
1248         cpu_buffer = buffer->buffers[cpu];
1249
1250         rb_commit(cpu_buffer, event);
1251
1252         /*
1253          * Only the last preempt count needs to restore preemption.
1254          */
1255         if (preempt_count() == 1) {
1256                 if (per_cpu(rb_need_resched, cpu))
1257                         preempt_enable_no_resched_notrace();
1258                 else
1259                         preempt_enable_notrace();
1260         } else
1261                 preempt_enable_no_resched_notrace();
1262
1263         return 0;
1264 }
1265
1266 /**
1267  * ring_buffer_write - write data to the buffer without reserving
1268  * @buffer: The ring buffer to write to.
1269  * @length: The length of the data being written (excluding the event header)
1270  * @data: The data to write to the buffer.
1271  *
1272  * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
1273  * one function. If you already have the data to write to the buffer, it
1274  * may be easier to simply call this function.
1275  *
1276  * Note, like ring_buffer_lock_reserve, the length is the length of the data
1277  * and not the length of the event which would hold the header.
1278  */
1279 int ring_buffer_write(struct ring_buffer *buffer,
1280                         unsigned long length,
1281                         void *data)
1282 {
1283         struct ring_buffer_per_cpu *cpu_buffer;
1284         struct ring_buffer_event *event;
1285         unsigned long event_length;
1286         void *body;
1287         int ret = -EBUSY;
1288         int cpu, resched;
1289
1290         if (ring_buffers_off)
1291                 return -EBUSY;
1292
1293         if (atomic_read(&buffer->record_disabled))
1294                 return -EBUSY;
1295
1296         resched = need_resched();
1297         preempt_disable_notrace();
1298
1299         cpu = raw_smp_processor_id();
1300
1301         if (!cpu_isset(cpu, buffer->cpumask))
1302                 goto out;
1303
1304         cpu_buffer = buffer->buffers[cpu];
1305
1306         if (atomic_read(&cpu_buffer->record_disabled))
1307                 goto out;
1308
1309         event_length = rb_calculate_event_length(length);
1310         event = rb_reserve_next_event(cpu_buffer,
1311                                       RINGBUF_TYPE_DATA, event_length);
1312         if (!event)
1313                 goto out;
1314
1315         body = rb_event_data(event);
1316
1317         memcpy(body, data, length);
1318
1319         rb_commit(cpu_buffer, event);
1320
1321         ret = 0;
1322  out:
1323         if (resched)
1324                 preempt_enable_no_resched_notrace();
1325         else
1326                 preempt_enable_notrace();
1327
1328         return ret;
1329 }
1330
1331 static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1332 {
1333         struct buffer_page *reader = cpu_buffer->reader_page;
1334         struct buffer_page *head = cpu_buffer->head_page;
1335         struct buffer_page *commit = cpu_buffer->commit_page;
1336
1337         return reader->read == rb_page_commit(reader) &&
1338                 (commit == reader ||
1339                  (commit == head &&
1340                   head->read == rb_page_commit(commit)));
1341 }
1342
1343 /**
1344  * ring_buffer_record_disable - stop all writes into the buffer
1345  * @buffer: The ring buffer to stop writes to.
1346  *
1347  * This prevents all writes to the buffer. Any attempt to write
1348  * to the buffer after this will fail and return NULL.
1349  *
1350  * The caller should call synchronize_sched() after this.
1351  */
1352 void ring_buffer_record_disable(struct ring_buffer *buffer)
1353 {
1354         atomic_inc(&buffer->record_disabled);
1355 }
1356
1357 /**
1358  * ring_buffer_record_enable - enable writes to the buffer
1359  * @buffer: The ring buffer to enable writes
1360  *
1361  * Note, multiple disables will need the same number of enables
1362  * to truely enable the writing (much like preempt_disable).
1363  */
1364 void ring_buffer_record_enable(struct ring_buffer *buffer)
1365 {
1366         atomic_dec(&buffer->record_disabled);
1367 }
1368
1369 /**
1370  * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1371  * @buffer: The ring buffer to stop writes to.
1372  * @cpu: The CPU buffer to stop
1373  *
1374  * This prevents all writes to the buffer. Any attempt to write
1375  * to the buffer after this will fail and return NULL.
1376  *
1377  * The caller should call synchronize_sched() after this.
1378  */
1379 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1380 {
1381         struct ring_buffer_per_cpu *cpu_buffer;
1382
1383         if (!cpu_isset(cpu, buffer->cpumask))
1384                 return;
1385
1386         cpu_buffer = buffer->buffers[cpu];
1387         atomic_inc(&cpu_buffer->record_disabled);
1388 }
1389
1390 /**
1391  * ring_buffer_record_enable_cpu - enable writes to the buffer
1392  * @buffer: The ring buffer to enable writes
1393  * @cpu: The CPU to enable.
1394  *
1395  * Note, multiple disables will need the same number of enables
1396  * to truely enable the writing (much like preempt_disable).
1397  */
1398 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1399 {
1400         struct ring_buffer_per_cpu *cpu_buffer;
1401
1402         if (!cpu_isset(cpu, buffer->cpumask))
1403                 return;
1404
1405         cpu_buffer = buffer->buffers[cpu];
1406         atomic_dec(&cpu_buffer->record_disabled);
1407 }
1408
1409 /**
1410  * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1411  * @buffer: The ring buffer
1412  * @cpu: The per CPU buffer to get the entries from.
1413  */
1414 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1415 {
1416         struct ring_buffer_per_cpu *cpu_buffer;
1417
1418         if (!cpu_isset(cpu, buffer->cpumask))
1419                 return 0;
1420
1421         cpu_buffer = buffer->buffers[cpu];
1422         return cpu_buffer->entries;
1423 }
1424
1425 /**
1426  * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1427  * @buffer: The ring buffer
1428  * @cpu: The per CPU buffer to get the number of overruns from
1429  */
1430 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1431 {
1432         struct ring_buffer_per_cpu *cpu_buffer;
1433
1434         if (!cpu_isset(cpu, buffer->cpumask))
1435                 return 0;
1436
1437         cpu_buffer = buffer->buffers[cpu];
1438         return cpu_buffer->overrun;
1439 }
1440
1441 /**
1442  * ring_buffer_entries - get the number of entries in a buffer
1443  * @buffer: The ring buffer
1444  *
1445  * Returns the total number of entries in the ring buffer
1446  * (all CPU entries)
1447  */
1448 unsigned long ring_buffer_entries(struct ring_buffer *buffer)
1449 {
1450         struct ring_buffer_per_cpu *cpu_buffer;
1451         unsigned long entries = 0;
1452         int cpu;
1453
1454         /* if you care about this being correct, lock the buffer */
1455         for_each_buffer_cpu(buffer, cpu) {
1456                 cpu_buffer = buffer->buffers[cpu];
1457                 entries += cpu_buffer->entries;
1458         }
1459
1460         return entries;
1461 }
1462
1463 /**
1464  * ring_buffer_overrun_cpu - get the number of overruns in buffer
1465  * @buffer: The ring buffer
1466  *
1467  * Returns the total number of overruns in the ring buffer
1468  * (all CPU entries)
1469  */
1470 unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
1471 {
1472         struct ring_buffer_per_cpu *cpu_buffer;
1473         unsigned long overruns = 0;
1474         int cpu;
1475
1476         /* if you care about this being correct, lock the buffer */
1477         for_each_buffer_cpu(buffer, cpu) {
1478                 cpu_buffer = buffer->buffers[cpu];
1479                 overruns += cpu_buffer->overrun;
1480         }
1481
1482         return overruns;
1483 }
1484
1485 /**
1486  * ring_buffer_iter_reset - reset an iterator
1487  * @iter: The iterator to reset
1488  *
1489  * Resets the iterator, so that it will start from the beginning
1490  * again.
1491  */
1492 void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1493 {
1494         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1495
1496         /* Iterator usage is expected to have record disabled */
1497         if (list_empty(&cpu_buffer->reader_page->list)) {
1498                 iter->head_page = cpu_buffer->head_page;
1499                 iter->head = cpu_buffer->head_page->read;
1500         } else {
1501                 iter->head_page = cpu_buffer->reader_page;
1502                 iter->head = cpu_buffer->reader_page->read;
1503         }
1504         if (iter->head)
1505                 iter->read_stamp = cpu_buffer->read_stamp;
1506         else
1507                 iter->read_stamp = iter->head_page->time_stamp;
1508 }
1509
1510 /**
1511  * ring_buffer_iter_empty - check if an iterator has no more to read
1512  * @iter: The iterator to check
1513  */
1514 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
1515 {
1516         struct ring_buffer_per_cpu *cpu_buffer;
1517
1518         cpu_buffer = iter->cpu_buffer;
1519
1520         return iter->head_page == cpu_buffer->commit_page &&
1521                 iter->head == rb_commit_index(cpu_buffer);
1522 }
1523
1524 static void
1525 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1526                      struct ring_buffer_event *event)
1527 {
1528         u64 delta;
1529
1530         switch (event->type) {
1531         case RINGBUF_TYPE_PADDING:
1532                 return;
1533
1534         case RINGBUF_TYPE_TIME_EXTEND:
1535                 delta = event->array[0];
1536                 delta <<= TS_SHIFT;
1537                 delta += event->time_delta;
1538                 cpu_buffer->read_stamp += delta;
1539                 return;
1540
1541         case RINGBUF_TYPE_TIME_STAMP:
1542                 /* FIXME: not implemented */
1543                 return;
1544
1545         case RINGBUF_TYPE_DATA:
1546                 cpu_buffer->read_stamp += event->time_delta;
1547                 return;
1548
1549         default:
1550                 BUG();
1551         }
1552         return;
1553 }
1554
1555 static void
1556 rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
1557                           struct ring_buffer_event *event)
1558 {
1559         u64 delta;
1560
1561         switch (event->type) {
1562         case RINGBUF_TYPE_PADDING:
1563                 return;
1564
1565         case RINGBUF_TYPE_TIME_EXTEND:
1566                 delta = event->array[0];
1567                 delta <<= TS_SHIFT;
1568                 delta += event->time_delta;
1569                 iter->read_stamp += delta;
1570                 return;
1571
1572         case RINGBUF_TYPE_TIME_STAMP:
1573                 /* FIXME: not implemented */
1574                 return;
1575
1576         case RINGBUF_TYPE_DATA:
1577                 iter->read_stamp += event->time_delta;
1578                 return;
1579
1580         default:
1581                 BUG();
1582         }
1583         return;
1584 }
1585
1586 static struct buffer_page *
1587 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1588 {
1589         struct buffer_page *reader = NULL;
1590         unsigned long flags;
1591         int nr_loops = 0;
1592
1593         spin_lock_irqsave(&cpu_buffer->lock, flags);
1594
1595  again:
1596         /*
1597          * This should normally only loop twice. But because the
1598          * start of the reader inserts an empty page, it causes
1599          * a case where we will loop three times. There should be no
1600          * reason to loop four times (that I know of).
1601          */
1602         if (unlikely(++nr_loops > 3)) {
1603                 RB_WARN_ON(cpu_buffer, 1);
1604                 reader = NULL;
1605                 goto out;
1606         }
1607
1608         reader = cpu_buffer->reader_page;
1609
1610         /* If there's more to read, return this page */
1611         if (cpu_buffer->reader_page->read < rb_page_size(reader))
1612                 goto out;
1613
1614         /* Never should we have an index greater than the size */
1615         RB_WARN_ON(cpu_buffer,
1616                    cpu_buffer->reader_page->read > rb_page_size(reader));
1617
1618         /* check if we caught up to the tail */
1619         reader = NULL;
1620         if (cpu_buffer->commit_page == cpu_buffer->reader_page)
1621                 goto out;
1622
1623         /*
1624          * Splice the empty reader page into the list around the head.
1625          * Reset the reader page to size zero.
1626          */
1627
1628         reader = cpu_buffer->head_page;
1629         cpu_buffer->reader_page->list.next = reader->list.next;
1630         cpu_buffer->reader_page->list.prev = reader->list.prev;
1631
1632         local_set(&cpu_buffer->reader_page->write, 0);
1633         local_set(&cpu_buffer->reader_page->commit, 0);
1634
1635         /* Make the reader page now replace the head */
1636         reader->list.prev->next = &cpu_buffer->reader_page->list;
1637         reader->list.next->prev = &cpu_buffer->reader_page->list;
1638
1639         /*
1640          * If the tail is on the reader, then we must set the head
1641          * to the inserted page, otherwise we set it one before.
1642          */
1643         cpu_buffer->head_page = cpu_buffer->reader_page;
1644
1645         if (cpu_buffer->commit_page != reader)
1646                 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
1647
1648         /* Finally update the reader page to the new head */
1649         cpu_buffer->reader_page = reader;
1650         rb_reset_reader_page(cpu_buffer);
1651
1652         goto again;
1653
1654  out:
1655         spin_unlock_irqrestore(&cpu_buffer->lock, flags);
1656
1657         return reader;
1658 }
1659
1660 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
1661 {
1662         struct ring_buffer_event *event;
1663         struct buffer_page *reader;
1664         unsigned length;
1665
1666         reader = rb_get_reader_page(cpu_buffer);
1667
1668         /* This function should not be called when buffer is empty */
1669         BUG_ON(!reader);
1670
1671         event = rb_reader_event(cpu_buffer);
1672
1673         if (event->type == RINGBUF_TYPE_DATA)
1674                 cpu_buffer->entries--;
1675
1676         rb_update_read_stamp(cpu_buffer, event);
1677
1678         length = rb_event_length(event);
1679         cpu_buffer->reader_page->read += length;
1680 }
1681
1682 static void rb_advance_iter(struct ring_buffer_iter *iter)
1683 {
1684         struct ring_buffer *buffer;
1685         struct ring_buffer_per_cpu *cpu_buffer;
1686         struct ring_buffer_event *event;
1687         unsigned length;
1688
1689         cpu_buffer = iter->cpu_buffer;
1690         buffer = cpu_buffer->buffer;
1691
1692         /*
1693          * Check if we are at the end of the buffer.
1694          */
1695         if (iter->head >= rb_page_size(iter->head_page)) {
1696                 BUG_ON(iter->head_page == cpu_buffer->commit_page);
1697                 rb_inc_iter(iter);
1698                 return;
1699         }
1700
1701         event = rb_iter_head_event(iter);
1702
1703         length = rb_event_length(event);
1704
1705         /*
1706          * This should not be called to advance the header if we are
1707          * at the tail of the buffer.
1708          */
1709         BUG_ON((iter->head_page == cpu_buffer->commit_page) &&
1710                (iter->head + length > rb_commit_index(cpu_buffer)));
1711
1712         rb_update_iter_read_stamp(iter, event);
1713
1714         iter->head += length;
1715
1716         /* check for end of page padding */
1717         if ((iter->head >= rb_page_size(iter->head_page)) &&
1718             (iter->head_page != cpu_buffer->commit_page))
1719                 rb_advance_iter(iter);
1720 }
1721
1722 /**
1723  * ring_buffer_peek - peek at the next event to be read
1724  * @buffer: The ring buffer to read
1725  * @cpu: The cpu to peak at
1726  * @ts: The timestamp counter of this event.
1727  *
1728  * This will return the event that will be read next, but does
1729  * not consume the data.
1730  */
1731 struct ring_buffer_event *
1732 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1733 {
1734         struct ring_buffer_per_cpu *cpu_buffer;
1735         struct ring_buffer_event *event;
1736         struct buffer_page *reader;
1737         int nr_loops = 0;
1738
1739         if (!cpu_isset(cpu, buffer->cpumask))
1740                 return NULL;
1741
1742         cpu_buffer = buffer->buffers[cpu];
1743
1744  again:
1745         /*
1746          * We repeat when a timestamp is encountered. It is possible
1747          * to get multiple timestamps from an interrupt entering just
1748          * as one timestamp is about to be written. The max times
1749          * that this can happen is the number of nested interrupts we
1750          * can have.  Nesting 10 deep of interrupts is clearly
1751          * an anomaly.
1752          */
1753         if (unlikely(++nr_loops > 10)) {
1754                 RB_WARN_ON(cpu_buffer, 1);
1755                 return NULL;
1756         }
1757
1758         reader = rb_get_reader_page(cpu_buffer);
1759         if (!reader)
1760                 return NULL;
1761
1762         event = rb_reader_event(cpu_buffer);
1763
1764         switch (event->type) {
1765         case RINGBUF_TYPE_PADDING:
1766                 RB_WARN_ON(cpu_buffer, 1);
1767                 rb_advance_reader(cpu_buffer);
1768                 return NULL;
1769
1770         case RINGBUF_TYPE_TIME_EXTEND:
1771                 /* Internal data, OK to advance */
1772                 rb_advance_reader(cpu_buffer);
1773                 goto again;
1774
1775         case RINGBUF_TYPE_TIME_STAMP:
1776                 /* FIXME: not implemented */
1777                 rb_advance_reader(cpu_buffer);
1778                 goto again;
1779
1780         case RINGBUF_TYPE_DATA:
1781                 if (ts) {
1782                         *ts = cpu_buffer->read_stamp + event->time_delta;
1783                         ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1784                 }
1785                 return event;
1786
1787         default:
1788                 BUG();
1789         }
1790
1791         return NULL;
1792 }
1793
1794 /**
1795  * ring_buffer_iter_peek - peek at the next event to be read
1796  * @iter: The ring buffer iterator
1797  * @ts: The timestamp counter of this event.
1798  *
1799  * This will return the event that will be read next, but does
1800  * not increment the iterator.
1801  */
1802 struct ring_buffer_event *
1803 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1804 {
1805         struct ring_buffer *buffer;
1806         struct ring_buffer_per_cpu *cpu_buffer;
1807         struct ring_buffer_event *event;
1808         int nr_loops = 0;
1809
1810         if (ring_buffer_iter_empty(iter))
1811                 return NULL;
1812
1813         cpu_buffer = iter->cpu_buffer;
1814         buffer = cpu_buffer->buffer;
1815
1816  again:
1817         /*
1818          * We repeat when a timestamp is encountered. It is possible
1819          * to get multiple timestamps from an interrupt entering just
1820          * as one timestamp is about to be written. The max times
1821          * that this can happen is the number of nested interrupts we
1822          * can have. Nesting 10 deep of interrupts is clearly
1823          * an anomaly.
1824          */
1825         if (unlikely(++nr_loops > 10)) {
1826                 RB_WARN_ON(cpu_buffer, 1);
1827                 return NULL;
1828         }
1829
1830         if (rb_per_cpu_empty(cpu_buffer))
1831                 return NULL;
1832
1833         event = rb_iter_head_event(iter);
1834
1835         switch (event->type) {
1836         case RINGBUF_TYPE_PADDING:
1837                 rb_inc_iter(iter);
1838                 goto again;
1839
1840         case RINGBUF_TYPE_TIME_EXTEND:
1841                 /* Internal data, OK to advance */
1842                 rb_advance_iter(iter);
1843                 goto again;
1844
1845         case RINGBUF_TYPE_TIME_STAMP:
1846                 /* FIXME: not implemented */
1847                 rb_advance_iter(iter);
1848                 goto again;
1849
1850         case RINGBUF_TYPE_DATA:
1851                 if (ts) {
1852                         *ts = iter->read_stamp + event->time_delta;
1853                         ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1854                 }
1855                 return event;
1856
1857         default:
1858                 BUG();
1859         }
1860
1861         return NULL;
1862 }
1863
1864 /**
1865  * ring_buffer_consume - return an event and consume it
1866  * @buffer: The ring buffer to get the next event from
1867  *
1868  * Returns the next event in the ring buffer, and that event is consumed.
1869  * Meaning, that sequential reads will keep returning a different event,
1870  * and eventually empty the ring buffer if the producer is slower.
1871  */
1872 struct ring_buffer_event *
1873 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
1874 {
1875         struct ring_buffer_per_cpu *cpu_buffer;
1876         struct ring_buffer_event *event;
1877
1878         if (!cpu_isset(cpu, buffer->cpumask))
1879                 return NULL;
1880
1881         event = ring_buffer_peek(buffer, cpu, ts);
1882         if (!event)
1883                 return NULL;
1884
1885         cpu_buffer = buffer->buffers[cpu];
1886         rb_advance_reader(cpu_buffer);
1887
1888         return event;
1889 }
1890
1891 /**
1892  * ring_buffer_read_start - start a non consuming read of the buffer
1893  * @buffer: The ring buffer to read from
1894  * @cpu: The cpu buffer to iterate over
1895  *
1896  * This starts up an iteration through the buffer. It also disables
1897  * the recording to the buffer until the reading is finished.
1898  * This prevents the reading from being corrupted. This is not
1899  * a consuming read, so a producer is not expected.
1900  *
1901  * Must be paired with ring_buffer_finish.
1902  */
1903 struct ring_buffer_iter *
1904 ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
1905 {
1906         struct ring_buffer_per_cpu *cpu_buffer;
1907         struct ring_buffer_iter *iter;
1908         unsigned long flags;
1909
1910         if (!cpu_isset(cpu, buffer->cpumask))
1911                 return NULL;
1912
1913         iter = kmalloc(sizeof(*iter), GFP_KERNEL);
1914         if (!iter)
1915                 return NULL;
1916
1917         cpu_buffer = buffer->buffers[cpu];
1918
1919         iter->cpu_buffer = cpu_buffer;
1920
1921         atomic_inc(&cpu_buffer->record_disabled);
1922         synchronize_sched();
1923
1924         spin_lock_irqsave(&cpu_buffer->lock, flags);
1925         ring_buffer_iter_reset(iter);
1926         spin_unlock_irqrestore(&cpu_buffer->lock, flags);
1927
1928         return iter;
1929 }
1930
1931 /**
1932  * ring_buffer_finish - finish reading the iterator of the buffer
1933  * @iter: The iterator retrieved by ring_buffer_start
1934  *
1935  * This re-enables the recording to the buffer, and frees the
1936  * iterator.
1937  */
1938 void
1939 ring_buffer_read_finish(struct ring_buffer_iter *iter)
1940 {
1941         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1942
1943         atomic_dec(&cpu_buffer->record_disabled);
1944         kfree(iter);
1945 }
1946
1947 /**
1948  * ring_buffer_read - read the next item in the ring buffer by the iterator
1949  * @iter: The ring buffer iterator
1950  * @ts: The time stamp of the event read.
1951  *
1952  * This reads the next event in the ring buffer and increments the iterator.
1953  */
1954 struct ring_buffer_event *
1955 ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
1956 {
1957         struct ring_buffer_event *event;
1958
1959         event = ring_buffer_iter_peek(iter, ts);
1960         if (!event)
1961                 return NULL;
1962
1963         rb_advance_iter(iter);
1964
1965         return event;
1966 }
1967
1968 /**
1969  * ring_buffer_size - return the size of the ring buffer (in bytes)
1970  * @buffer: The ring buffer.
1971  */
1972 unsigned long ring_buffer_size(struct ring_buffer *buffer)
1973 {
1974         return BUF_PAGE_SIZE * buffer->pages;
1975 }
1976
1977 static void
1978 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
1979 {
1980         cpu_buffer->head_page
1981                 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
1982         local_set(&cpu_buffer->head_page->write, 0);
1983         local_set(&cpu_buffer->head_page->commit, 0);
1984
1985         cpu_buffer->head_page->read = 0;
1986
1987         cpu_buffer->tail_page = cpu_buffer->head_page;
1988         cpu_buffer->commit_page = cpu_buffer->head_page;
1989
1990         INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1991         local_set(&cpu_buffer->reader_page->write, 0);
1992         local_set(&cpu_buffer->reader_page->commit, 0);
1993         cpu_buffer->reader_page->read = 0;
1994
1995         cpu_buffer->overrun = 0;
1996         cpu_buffer->entries = 0;
1997 }
1998
1999 /**
2000  * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
2001  * @buffer: The ring buffer to reset a per cpu buffer of
2002  * @cpu: The CPU buffer to be reset
2003  */
2004 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2005 {
2006         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2007         unsigned long flags;
2008
2009         if (!cpu_isset(cpu, buffer->cpumask))
2010                 return;
2011
2012         spin_lock_irqsave(&cpu_buffer->lock, flags);
2013
2014         rb_reset_cpu(cpu_buffer);
2015
2016         spin_unlock_irqrestore(&cpu_buffer->lock, flags);
2017 }
2018
2019 /**
2020  * ring_buffer_reset - reset a ring buffer
2021  * @buffer: The ring buffer to reset all cpu buffers
2022  */
2023 void ring_buffer_reset(struct ring_buffer *buffer)
2024 {
2025         int cpu;
2026
2027         for_each_buffer_cpu(buffer, cpu)
2028                 ring_buffer_reset_cpu(buffer, cpu);
2029 }
2030
2031 /**
2032  * rind_buffer_empty - is the ring buffer empty?
2033  * @buffer: The ring buffer to test
2034  */
2035 int ring_buffer_empty(struct ring_buffer *buffer)
2036 {
2037         struct ring_buffer_per_cpu *cpu_buffer;
2038         int cpu;
2039
2040         /* yes this is racy, but if you don't like the race, lock the buffer */
2041         for_each_buffer_cpu(buffer, cpu) {
2042                 cpu_buffer = buffer->buffers[cpu];
2043                 if (!rb_per_cpu_empty(cpu_buffer))
2044                         return 0;
2045         }
2046         return 1;
2047 }
2048
2049 /**
2050  * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
2051  * @buffer: The ring buffer
2052  * @cpu: The CPU buffer to test
2053  */
2054 int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2055 {
2056         struct ring_buffer_per_cpu *cpu_buffer;
2057
2058         if (!cpu_isset(cpu, buffer->cpumask))
2059                 return 1;
2060
2061         cpu_buffer = buffer->buffers[cpu];
2062         return rb_per_cpu_empty(cpu_buffer);
2063 }
2064
2065 /**
2066  * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
2067  * @buffer_a: One buffer to swap with
2068  * @buffer_b: The other buffer to swap with
2069  *
2070  * This function is useful for tracers that want to take a "snapshot"
2071  * of a CPU buffer and has another back up buffer lying around.
2072  * it is expected that the tracer handles the cpu buffer not being
2073  * used at the moment.
2074  */
2075 int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2076                          struct ring_buffer *buffer_b, int cpu)
2077 {
2078         struct ring_buffer_per_cpu *cpu_buffer_a;
2079         struct ring_buffer_per_cpu *cpu_buffer_b;
2080
2081         if (!cpu_isset(cpu, buffer_a->cpumask) ||
2082             !cpu_isset(cpu, buffer_b->cpumask))
2083                 return -EINVAL;
2084
2085         /* At least make sure the two buffers are somewhat the same */
2086         if (buffer_a->size != buffer_b->size ||
2087             buffer_a->pages != buffer_b->pages)
2088                 return -EINVAL;
2089
2090         cpu_buffer_a = buffer_a->buffers[cpu];
2091         cpu_buffer_b = buffer_b->buffers[cpu];
2092
2093         /*
2094          * We can't do a synchronize_sched here because this
2095          * function can be called in atomic context.
2096          * Normally this will be called from the same CPU as cpu.
2097          * If not it's up to the caller to protect this.
2098          */
2099         atomic_inc(&cpu_buffer_a->record_disabled);
2100         atomic_inc(&cpu_buffer_b->record_disabled);
2101
2102         buffer_a->buffers[cpu] = cpu_buffer_b;
2103         buffer_b->buffers[cpu] = cpu_buffer_a;
2104
2105         cpu_buffer_b->buffer = buffer_a;
2106         cpu_buffer_a->buffer = buffer_b;
2107
2108         atomic_dec(&cpu_buffer_a->record_disabled);
2109         atomic_dec(&cpu_buffer_b->record_disabled);
2110
2111         return 0;
2112 }
2113
2114 static ssize_t
2115 rb_simple_read(struct file *filp, char __user *ubuf,
2116                size_t cnt, loff_t *ppos)
2117 {
2118         int *p = filp->private_data;
2119         char buf[64];
2120         int r;
2121
2122         /* !ring_buffers_off == tracing_on */
2123         r = sprintf(buf, "%d\n", !*p);
2124
2125         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2126 }
2127
2128 static ssize_t
2129 rb_simple_write(struct file *filp, const char __user *ubuf,
2130                 size_t cnt, loff_t *ppos)
2131 {
2132         int *p = filp->private_data;
2133         char buf[64];
2134         long val;
2135         int ret;
2136
2137         if (cnt >= sizeof(buf))
2138                 return -EINVAL;
2139
2140         if (copy_from_user(&buf, ubuf, cnt))
2141                 return -EFAULT;
2142
2143         buf[cnt] = 0;
2144
2145         ret = strict_strtoul(buf, 10, &val);
2146         if (ret < 0)
2147                 return ret;
2148
2149         /* !ring_buffers_off == tracing_on */
2150         *p = !val;
2151
2152         (*ppos)++;
2153
2154         return cnt;
2155 }
2156
2157 static struct file_operations rb_simple_fops = {
2158         .open           = tracing_open_generic,
2159         .read           = rb_simple_read,
2160         .write          = rb_simple_write,
2161 };
2162
2163
2164 static __init int rb_init_debugfs(void)
2165 {
2166         struct dentry *d_tracer;
2167         struct dentry *entry;
2168
2169         d_tracer = tracing_init_dentry();
2170
2171         entry = debugfs_create_file("tracing_on", 0644, d_tracer,
2172                                     &ring_buffers_off, &rb_simple_fops);
2173         if (!entry)
2174                 pr_warning("Could not create debugfs 'tracing_on' entry\n");
2175
2176         return 0;
2177 }
2178
2179 fs_initcall(rb_init_debugfs);