oprofile/x86: move IBS code
authorRobert Richter <robert.richter@amd.com>
Fri, 26 Feb 2010 09:54:56 +0000 (10:54 +0100)
committerRobert Richter <robert.richter@amd.com>
Tue, 4 May 2010 09:35:28 +0000 (11:35 +0200)
Moving code to make future changes easier. This groups all IBS code
together.

Signed-off-by: Robert Richter <robert.richter@amd.com>
arch/x86/oprofile/op_model_amd.c

index 536d0b0..e159254 100644 (file)
@@ -102,116 +102,6 @@ static u32 get_ibs_caps(void)
        return ibs_caps;
 }
 
-#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
-
-static void op_mux_switch_ctrl(struct op_x86_model_spec const *model,
-                              struct op_msrs const * const msrs)
-{
-       u64 val;
-       int i;
-
-       /* enable active counters */
-       for (i = 0; i < NUM_COUNTERS; ++i) {
-               int virt = op_x86_phys_to_virt(i);
-               if (!reset_value[virt])
-                       continue;
-               rdmsrl(msrs->controls[i].addr, val);
-               val &= model->reserved;
-               val |= op_x86_get_ctrl(model, &counter_config[virt]);
-               wrmsrl(msrs->controls[i].addr, val);
-       }
-}
-
-#endif
-
-/* functions for op_amd_spec */
-
-static void op_amd_shutdown(struct op_msrs const * const msrs)
-{
-       int i;
-
-       for (i = 0; i < NUM_COUNTERS; ++i) {
-               if (!msrs->counters[i].addr)
-                       continue;
-               release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
-               release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
-       }
-}
-
-static int op_amd_fill_in_addresses(struct op_msrs * const msrs)
-{
-       int i;
-
-       for (i = 0; i < NUM_COUNTERS; i++) {
-               if (!reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i))
-                       goto fail;
-               if (!reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i)) {
-                       release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
-                       goto fail;
-               }
-               /* both registers must be reserved */
-               msrs->counters[i].addr = MSR_K7_PERFCTR0 + i;
-               msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i;
-               continue;
-       fail:
-               if (!counter_config[i].enabled)
-                       continue;
-               op_x86_warn_reserved(i);
-               op_amd_shutdown(msrs);
-               return -EBUSY;
-       }
-
-       return 0;
-}
-
-static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
-                             struct op_msrs const * const msrs)
-{
-       u64 val;
-       int i;
-
-       /* setup reset_value */
-       for (i = 0; i < NUM_VIRT_COUNTERS; ++i) {
-               if (counter_config[i].enabled
-                   && msrs->counters[op_x86_virt_to_phys(i)].addr)
-                       reset_value[i] = counter_config[i].count;
-               else
-                       reset_value[i] = 0;
-       }
-
-       /* clear all counters */
-       for (i = 0; i < NUM_COUNTERS; ++i) {
-               if (!msrs->controls[i].addr)
-                       continue;
-               rdmsrl(msrs->controls[i].addr, val);
-               if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
-                       op_x86_warn_in_use(i);
-               val &= model->reserved;
-               wrmsrl(msrs->controls[i].addr, val);
-               /*
-                * avoid a false detection of ctr overflows in NMI
-                * handler
-                */
-               wrmsrl(msrs->counters[i].addr, -1LL);
-       }
-
-       /* enable active counters */
-       for (i = 0; i < NUM_COUNTERS; ++i) {
-               int virt = op_x86_phys_to_virt(i);
-               if (!reset_value[virt])
-                       continue;
-
-               /* setup counter registers */
-               wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]);
-
-               /* setup control registers */
-               rdmsrl(msrs->controls[i].addr, val);
-               val &= model->reserved;
-               val |= op_x86_get_ctrl(model, &counter_config[virt]);
-               wrmsrl(msrs->controls[i].addr, val);
-       }
-}
-
 /*
  * 16-bit Linear Feedback Shift Register (LFSR)
  *
@@ -376,6 +266,116 @@ static void op_amd_stop_ibs(void)
                wrmsrl(MSR_AMD64_IBSOPCTL, 0);
 }
 
+#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
+
+static void op_mux_switch_ctrl(struct op_x86_model_spec const *model,
+                              struct op_msrs const * const msrs)
+{
+       u64 val;
+       int i;
+
+       /* enable active counters */
+       for (i = 0; i < NUM_COUNTERS; ++i) {
+               int virt = op_x86_phys_to_virt(i);
+               if (!reset_value[virt])
+                       continue;
+               rdmsrl(msrs->controls[i].addr, val);
+               val &= model->reserved;
+               val |= op_x86_get_ctrl(model, &counter_config[virt]);
+               wrmsrl(msrs->controls[i].addr, val);
+       }
+}
+
+#endif
+
+/* functions for op_amd_spec */
+
+static void op_amd_shutdown(struct op_msrs const * const msrs)
+{
+       int i;
+
+       for (i = 0; i < NUM_COUNTERS; ++i) {
+               if (!msrs->counters[i].addr)
+                       continue;
+               release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
+               release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
+       }
+}
+
+static int op_amd_fill_in_addresses(struct op_msrs * const msrs)
+{
+       int i;
+
+       for (i = 0; i < NUM_COUNTERS; i++) {
+               if (!reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i))
+                       goto fail;
+               if (!reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i)) {
+                       release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
+                       goto fail;
+               }
+               /* both registers must be reserved */
+               msrs->counters[i].addr = MSR_K7_PERFCTR0 + i;
+               msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i;
+               continue;
+       fail:
+               if (!counter_config[i].enabled)
+                       continue;
+               op_x86_warn_reserved(i);
+               op_amd_shutdown(msrs);
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
+static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
+                             struct op_msrs const * const msrs)
+{
+       u64 val;
+       int i;
+
+       /* setup reset_value */
+       for (i = 0; i < NUM_VIRT_COUNTERS; ++i) {
+               if (counter_config[i].enabled
+                   && msrs->counters[op_x86_virt_to_phys(i)].addr)
+                       reset_value[i] = counter_config[i].count;
+               else
+                       reset_value[i] = 0;
+       }
+
+       /* clear all counters */
+       for (i = 0; i < NUM_COUNTERS; ++i) {
+               if (!msrs->controls[i].addr)
+                       continue;
+               rdmsrl(msrs->controls[i].addr, val);
+               if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
+                       op_x86_warn_in_use(i);
+               val &= model->reserved;
+               wrmsrl(msrs->controls[i].addr, val);
+               /*
+                * avoid a false detection of ctr overflows in NMI
+                * handler
+                */
+               wrmsrl(msrs->counters[i].addr, -1LL);
+       }
+
+       /* enable active counters */
+       for (i = 0; i < NUM_COUNTERS; ++i) {
+               int virt = op_x86_phys_to_virt(i);
+               if (!reset_value[virt])
+                       continue;
+
+               /* setup counter registers */
+               wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]);
+
+               /* setup control registers */
+               rdmsrl(msrs->controls[i].addr, val);
+               val &= model->reserved;
+               val |= op_x86_get_ctrl(model, &counter_config[virt]);
+               wrmsrl(msrs->controls[i].addr, val);
+       }
+}
+
 static int op_amd_check_ctrs(struct pt_regs * const regs,
                             struct op_msrs const * const msrs)
 {