perf, x86: Properly account n_added
[safe/jmp/linux-2.6] / arch / x86 / kernel / cpu / perf_event_intel.c
index 73102df..84bfde6 100644 (file)
@@ -548,9 +548,9 @@ static inline void intel_pmu_ack_status(u64 ack)
 }
 
 static inline void
-intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx)
+intel_pmu_disable_fixed(struct hw_perf_event *hwc)
 {
-       int idx = __idx - X86_PMC_IDX_FIXED;
+       int idx = hwc->idx - X86_PMC_IDX_FIXED;
        u64 ctrl_val, mask;
 
        mask = 0xfULL << (idx * 4);
@@ -590,10 +590,9 @@ static void intel_pmu_drain_bts_buffer(void)
 
        ds->bts_index = ds->bts_buffer_base;
 
+       perf_sample_data_init(&data, 0);
 
        data.period     = event->hw.last_period;
-       data.addr       = 0;
-       data.raw        = NULL;
        regs.ip         = 0;
 
        /*
@@ -622,26 +621,28 @@ static void intel_pmu_drain_bts_buffer(void)
 }
 
 static inline void
-intel_pmu_disable_event(struct hw_perf_event *hwc, int idx)
+intel_pmu_disable_event(struct perf_event *event)
 {
-       if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
+       struct hw_perf_event *hwc = &event->hw;
+
+       if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
                intel_pmu_disable_bts();
                intel_pmu_drain_bts_buffer();
                return;
        }
 
        if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
-               intel_pmu_disable_fixed(hwc, idx);
+               intel_pmu_disable_fixed(hwc);
                return;
        }
 
-       x86_pmu_disable_event(hwc, idx);
+       x86_pmu_disable_event(event);
 }
 
 static inline void
-intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
+intel_pmu_enable_fixed(struct hw_perf_event *hwc)
 {
-       int idx = __idx - X86_PMC_IDX_FIXED;
+       int idx = hwc->idx - X86_PMC_IDX_FIXED;
        u64 ctrl_val, bits, mask;
        int err;
 
@@ -671,9 +672,11 @@ intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
        err = checking_wrmsrl(hwc->config_base, ctrl_val);
 }
 
-static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
+static void intel_pmu_enable_event(struct perf_event *event)
 {
-       if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
+       struct hw_perf_event *hwc = &event->hw;
+
+       if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
                if (!__get_cpu_var(cpu_hw_events).enabled)
                        return;
 
@@ -682,11 +685,11 @@ static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
        }
 
        if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
-               intel_pmu_enable_fixed(hwc, idx);
+               intel_pmu_enable_fixed(hwc);
                return;
        }
 
-       __x86_pmu_enable_event(hwc, idx);
+       __x86_pmu_enable_event(hwc);
 }
 
 /*
@@ -695,14 +698,8 @@ static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
  */
 static int intel_pmu_save_and_restart(struct perf_event *event)
 {
-       struct hw_perf_event *hwc = &event->hw;
-       int idx = hwc->idx;
-       int ret;
-
-       x86_perf_event_update(event, hwc, idx);
-       ret = x86_perf_event_set_period(event, hwc, idx);
-
-       return ret;
+       x86_perf_event_update(event);
+       return x86_perf_event_set_period(event);
 }
 
 static void intel_pmu_reset(void)
@@ -742,16 +739,15 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
        int bit, loops;
        u64 ack, status;
 
-       data.addr = 0;
-       data.raw = NULL;
+       perf_sample_data_init(&data, 0);
 
        cpuc = &__get_cpu_var(cpu_hw_events);
 
-       perf_disable();
+       intel_pmu_disable_all();
        intel_pmu_drain_bts_buffer();
        status = intel_pmu_get_status();
        if (!status) {
-               perf_enable();
+               intel_pmu_enable_all();
                return 0;
        }
 
@@ -761,8 +757,7 @@ again:
                WARN_ONCE(1, "perfevents: irq loop stuck!\n");
                perf_event_print_debug();
                intel_pmu_reset();
-               perf_enable();
-               return 1;
+               goto done;
        }
 
        inc_irq_stat(apic_perf_irqs);
@@ -770,7 +765,6 @@ again:
        for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
                struct perf_event *event = cpuc->events[bit];
 
-               clear_bit(bit, (unsigned long *) &status);
                if (!test_bit(bit, cpuc->active_mask))
                        continue;
 
@@ -780,7 +774,7 @@ again:
                data.period = event->hw.last_period;
 
                if (perf_event_overflow(event, 1, &data, regs))
-                       intel_pmu_disable_event(&event->hw, bit);
+                       x86_pmu_stop(event);
        }
 
        intel_pmu_ack_status(ack);
@@ -792,8 +786,8 @@ again:
        if (status)
                goto again;
 
-       perf_enable();
-
+done:
+       intel_pmu_enable_all();
        return 1;
 }
 
@@ -872,7 +866,10 @@ static __initconst struct x86_pmu intel_pmu = {
        .max_period             = (1ULL << 31) - 1,
        .enable_bts             = intel_pmu_enable_bts,
        .disable_bts            = intel_pmu_disable_bts,
-       .get_event_constraints  = intel_get_event_constraints
+       .get_event_constraints  = intel_get_event_constraints,
+
+       .cpu_starting           = init_debug_store_on_cpu,
+       .cpu_dying              = fini_debug_store_on_cpu,
 };
 
 static __init int intel_pmu_init(void)