X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=drivers%2Foprofile%2Fcpu_buffer.c;h=efcbf4b4579f275bccea1b00fda95891925e4be6;hb=0e170c72c0c55bd78213a0f5053bd9a1dde403b7;hp=fc4bc9b94c748d2e1736913f8d4978bc07cfcb49;hpb=273577165cd206d2d6689ee4b18aa13de1ec4bde;p=safe%2Fjmp%2Flinux-2.6 diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c index fc4bc9b..efcbf4b 100644 --- a/drivers/oprofile/cpu_buffer.c +++ b/drivers/oprofile/cpu_buffer.c @@ -27,9 +27,9 @@ #include "buffer_sync.h" #include "oprof.h" -struct oprofile_cpu_buffer cpu_buffer[NR_CPUS] __cacheline_aligned; +DEFINE_PER_CPU_SHARED_ALIGNED(struct oprofile_cpu_buffer, cpu_buffer); -static void wq_sync_buffer(void *); +static void wq_sync_buffer(struct work_struct *work); #define DEFAULT_TIMER_EXPIRE (HZ / 10) static int work_enabled; @@ -39,7 +39,7 @@ void free_cpu_buffers(void) int i; for_each_online_cpu(i) - vfree(cpu_buffer[i].buffer); + vfree(per_cpu(cpu_buffer, i).buffer); } int alloc_cpu_buffers(void) @@ -49,7 +49,7 @@ int alloc_cpu_buffers(void) unsigned long buffer_size = fs_cpu_buffer_size; for_each_online_cpu(i) { - struct oprofile_cpu_buffer * b = &cpu_buffer[i]; + struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); b->buffer = vmalloc_node(sizeof(struct op_sample) * buffer_size, cpu_to_node(i)); @@ -64,8 +64,10 @@ int alloc_cpu_buffers(void) b->head_pos = 0; b->sample_received = 0; b->sample_lost_overflow = 0; + b->backtrace_aborted = 0; + b->sample_invalid_eip = 0; b->cpu = i; - INIT_WORK(&b->work, wq_sync_buffer, b); + INIT_DELAYED_WORK(&b->work, wq_sync_buffer); } return 0; @@ -81,7 +83,7 @@ void start_cpu_work(void) work_enabled = 1; for_each_online_cpu(i) { - struct oprofile_cpu_buffer * b = &cpu_buffer[i]; + struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); /* * Spread the work by 1 jiffy per cpu so they dont all @@ -98,7 +100,7 @@ void end_cpu_work(void) work_enabled = 0; for_each_online_cpu(i) { - struct oprofile_cpu_buffer * b = &cpu_buffer[i]; + struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); cancel_delayed_work(&b->work); } @@ -175,6 +177,11 @@ static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc, cpu_buf->sample_received++; + if (pc == ESCAPE_CODE) { + cpu_buf->sample_invalid_eip++; + return 0; + } + if (nr_available_slots(cpu_buf) < 3) { cpu_buf->sample_lost_overflow++; return 0; @@ -220,7 +227,7 @@ static void oprofile_end_trace(struct oprofile_cpu_buffer * cpu_buf) void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, unsigned long event, int is_kernel) { - struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()]; + struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); if (!backtrace_depth) { log_sample(cpu_buf, pc, is_kernel, event); @@ -247,13 +254,13 @@ void oprofile_add_sample(struct pt_regs * const regs, unsigned long event) void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event) { - struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()]; + struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); log_sample(cpu_buf, pc, is_kernel, event); } void oprofile_add_trace(unsigned long pc) { - struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()]; + struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); if (!cpu_buf->tracing) return; @@ -282,9 +289,10 @@ void oprofile_add_trace(unsigned long pc) * By using schedule_delayed_work_on and then schedule_delayed_work * we guarantee this will stay on the correct cpu */ -static void wq_sync_buffer(void * data) +static void wq_sync_buffer(struct work_struct *work) { - struct oprofile_cpu_buffer * b = data; + struct oprofile_cpu_buffer * b = + container_of(work, struct oprofile_cpu_buffer, work.work); if (b->cpu != smp_processor_id()) { printk("WQ on CPU%d, prefer CPU%d\n", smp_processor_id(), b->cpu);