#include <linux/workqueue.h>
#include <linux/cache.h>
#include <linux/sched.h>
+#include <linux/ring_buffer.h>
struct task_struct;
unsigned long event;
};
+struct op_entry {
+ struct ring_buffer_event *event;
+ struct op_sample *sample;
+ unsigned long irq_flags;
+};
+
struct oprofile_cpu_buffer {
volatile unsigned long head_pos;
volatile unsigned long tail_pos;
struct task_struct *last_task;
int last_is_kernel;
int tracing;
- struct op_sample *buffer;
unsigned long sample_received;
unsigned long sample_lost_overflow;
unsigned long backtrace_aborted;
DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
-void cpu_buffer_reset(struct oprofile_cpu_buffer *cpu_buf);
-
-static inline
-struct op_sample *cpu_buffer_write_entry(struct oprofile_cpu_buffer *cpu_buf)
-{
- return &cpu_buf->buffer[cpu_buf->head_pos];
-}
-
-static inline
-void cpu_buffer_write_commit(struct oprofile_cpu_buffer *b)
+/*
+ * Resets the cpu buffer to a sane state.
+ *
+ * reset these to invalid values; the next sample collected will
+ * populate the buffer with proper values to initialize the buffer
+ */
+static inline void op_cpu_buffer_reset(int cpu)
{
- unsigned long new_head = b->head_pos + 1;
-
- /*
- * Ensure anything written to the slot before we increment is
- * visible
- */
- wmb();
+ struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu);
- if (new_head < b->buffer_size)
- b->head_pos = new_head;
- else
- b->head_pos = 0;
+ cpu_buf->last_is_kernel = -1;
+ cpu_buf->last_task = NULL;
}
-static inline
-struct op_sample *cpu_buffer_read_entry(struct oprofile_cpu_buffer *cpu_buf)
-{
- return &cpu_buf->buffer[cpu_buf->tail_pos];
-}
+int op_cpu_buffer_write_entry(struct op_entry *entry);
+int op_cpu_buffer_write_commit(struct op_entry *entry);
+struct op_sample *op_cpu_buffer_read_entry(int cpu);
+unsigned long op_cpu_buffer_entries(int cpu);
/* transient events for the CPU buffer -> event buffer */
#define CPU_IS_KERNEL 1