4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING
7 * @author John Levon <levon@movementarian.org>
10 #ifndef OPROFILE_CPU_BUFFER_H
11 #define OPROFILE_CPU_BUFFER_H
13 #include <linux/types.h>
14 #include <linux/spinlock.h>
15 #include <linux/workqueue.h>
16 #include <linux/cache.h>
17 #include <linux/sched.h>
21 int alloc_cpu_buffers(void);
22 void free_cpu_buffers(void);
24 void start_cpu_work(void);
25 void end_cpu_work(void);
27 /* CPU buffer is composed of such entries (which are
28 * also used for context switch notes)
35 struct oprofile_cpu_buffer {
36 volatile unsigned long head_pos;
37 volatile unsigned long tail_pos;
38 unsigned long buffer_size;
39 struct task_struct *last_task;
42 struct op_sample *buffer;
43 unsigned long sample_received;
44 unsigned long sample_lost_overflow;
45 unsigned long backtrace_aborted;
46 unsigned long sample_invalid_eip;
48 struct delayed_work work;
51 DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
53 void cpu_buffer_reset(struct oprofile_cpu_buffer *cpu_buf);
56 struct op_sample *cpu_buffer_write_entry(struct oprofile_cpu_buffer *cpu_buf)
58 return &cpu_buf->buffer[cpu_buf->head_pos];
62 void cpu_buffer_write_commit(struct oprofile_cpu_buffer *b)
64 unsigned long new_head = b->head_pos + 1;
67 * Ensure anything written to the slot before we increment is
72 if (new_head < b->buffer_size)
73 b->head_pos = new_head;
79 struct op_sample *cpu_buffer_read_entry(struct oprofile_cpu_buffer *cpu_buf)
81 return &cpu_buf->buffer[cpu_buf->tail_pos];
84 /* "acquire" as many cpu buffer slots as we can */
86 unsigned long cpu_buffer_entries(struct oprofile_cpu_buffer *b)
88 unsigned long head = b->head_pos;
89 unsigned long tail = b->tail_pos;
92 * Subtle. This resets the persistent last_task
93 * and in_kernel values used for switching notes.
94 * BUT, there is a small window between reading
95 * head_pos, and this call, that means samples
96 * can appear at the new head position, but not
97 * be prefixed with the notes for switching
98 * kernel mode or a task switch. This small hole
99 * can lead to mis-attribution or samples where
100 * we don't know if it's in the kernel or not,
101 * at the start of an event buffer.
108 return head + (b->buffer_size - tail);
111 /* transient events for the CPU buffer -> event buffer */
112 #define CPU_IS_KERNEL 1
113 #define CPU_TRACE_BEGIN 2
114 #define IBS_FETCH_BEGIN 3
115 #define IBS_OP_BEGIN 4
117 #endif /* OPROFILE_CPU_BUFFER_H */