* @remark Read the file COPYING
*
* @author John Levon <levon@movementarian.org>
+ * @author Barry Kasindorf <barry.kasindorf@amd.com>
*
* Each CPU has a local buffer that stores PC value/event
* pairs. We also log context switches when we notice them.
#include "buffer_sync.h"
#include "oprof.h"
-struct oprofile_cpu_buffer cpu_buffer[NR_CPUS] __cacheline_aligned;
+DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
static void wq_sync_buffer(struct work_struct *work);
{
int i;
- for_each_online_cpu(i)
- vfree(cpu_buffer[i].buffer);
+ for_each_online_cpu(i) {
+ vfree(per_cpu(cpu_buffer, i).buffer);
+ per_cpu(cpu_buffer, i).buffer = NULL;
+ }
}
int alloc_cpu_buffers(void)
unsigned long buffer_size = fs_cpu_buffer_size;
for_each_online_cpu(i) {
- struct oprofile_cpu_buffer * b = &cpu_buffer[i];
+ struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
b->buffer = vmalloc_node(sizeof(struct op_sample) * buffer_size,
cpu_to_node(i));
work_enabled = 1;
for_each_online_cpu(i) {
- struct oprofile_cpu_buffer * b = &cpu_buffer[i];
+ struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
/*
* Spread the work by 1 jiffy per cpu so they dont all
work_enabled = 0;
for_each_online_cpu(i) {
- struct oprofile_cpu_buffer * b = &cpu_buffer[i];
+ struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
cancel_delayed_work(&b->work);
}
return 1;
}
-static int oprofile_begin_trace(struct oprofile_cpu_buffer * cpu_buf)
+static int oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)
{
if (nr_available_slots(cpu_buf) < 4) {
cpu_buf->sample_lost_overflow++;
void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
unsigned long event, int is_kernel)
{
- struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
+ struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
if (!backtrace_depth) {
log_sample(cpu_buf, pc, is_kernel, event);
oprofile_add_ext_sample(pc, regs, event, is_kernel);
}
+#ifdef CONFIG_OPROFILE_IBS
+
+#define MAX_IBS_SAMPLE_SIZE 14
+
+void oprofile_add_ibs_sample(struct pt_regs *const regs,
+ unsigned int * const ibs_sample, int ibs_code)
+{
+ int is_kernel = !user_mode(regs);
+ struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
+ struct task_struct *task;
+
+ cpu_buf->sample_received++;
+
+ if (nr_available_slots(cpu_buf) < MAX_IBS_SAMPLE_SIZE) {
+ /* we can't backtrace since we lost the source of this event */
+ cpu_buf->sample_lost_overflow++;
+ return;
+ }
+
+ /* notice a switch from user->kernel or vice versa */
+ if (cpu_buf->last_is_kernel != is_kernel) {
+ cpu_buf->last_is_kernel = is_kernel;
+ add_code(cpu_buf, is_kernel);
+ }
+
+ /* notice a task switch */
+ if (!is_kernel) {
+ task = current;
+ if (cpu_buf->last_task != task) {
+ cpu_buf->last_task = task;
+ add_code(cpu_buf, (unsigned long)task);
+ }
+ }
+
+ add_code(cpu_buf, ibs_code);
+ add_sample(cpu_buf, ibs_sample[0], ibs_sample[1]);
+ add_sample(cpu_buf, ibs_sample[2], ibs_sample[3]);
+ add_sample(cpu_buf, ibs_sample[4], ibs_sample[5]);
+
+ if (ibs_code == IBS_OP_BEGIN) {
+ add_sample(cpu_buf, ibs_sample[6], ibs_sample[7]);
+ add_sample(cpu_buf, ibs_sample[8], ibs_sample[9]);
+ add_sample(cpu_buf, ibs_sample[10], ibs_sample[11]);
+ }
+
+ if (backtrace_depth)
+ oprofile_ops.backtrace(regs, backtrace_depth);
+}
+
+#endif
+
void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
{
- struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
+ struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
log_sample(cpu_buf, pc, is_kernel, event);
}
void oprofile_add_trace(unsigned long pc)
{
- struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
+ struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
if (!cpu_buf->tracing)
return;
struct oprofile_cpu_buffer * b =
container_of(work, struct oprofile_cpu_buffer, work.work);
if (b->cpu != smp_processor_id()) {
- printk("WQ on CPU%d, prefer CPU%d\n",
+ printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n",
smp_processor_id(), b->cpu);
}
sync_buffer(b->cpu);