oprofile/x86: warn user if a counter is already active
authorRobert Richter <robert.richter@amd.com>
Tue, 23 Feb 2010 17:14:58 +0000 (18:14 +0100)
committerRobert Richter <robert.richter@amd.com>
Fri, 26 Feb 2010 14:14:03 +0000 (15:14 +0100)
This patch generates a warning if a counter is already active.

Implemented for AMD and P6 models. P4 is not supported.

Cc: Naga Chumbalkar <nagananda.chumbalkar@hp.com>
Cc: Shashi Belur <shashi-kiran.belur@hp.com>
Cc: Tony Jones <tonyj@suse.de>
Signed-off-by: Robert Richter <robert.richter@amd.com>
arch/x86/oprofile/op_model_amd.c
arch/x86/oprofile/op_model_ppro.c
arch/x86/oprofile/op_x86_model.h

index a9d1947..ef9d735 100644 (file)
@@ -194,9 +194,18 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
 
        /* clear all counters */
        for (i = 0; i < NUM_CONTROLS; ++i) {
-               if (unlikely(!msrs->controls[i].addr))
+               if (unlikely(!msrs->controls[i].addr)) {
+                       if (counter_config[i].enabled && !smp_processor_id())
+                               /*
+                                * counter is reserved, this is on all
+                                * cpus, so report only for cpu #0
+                                */
+                               op_x86_warn_reserved(i);
                        continue;
+               }
                rdmsrl(msrs->controls[i].addr, val);
+               if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
+                       op_x86_warn_in_use(i);
                val &= model->reserved;
                wrmsrl(msrs->controls[i].addr, val);
        }
index 8eb0587..c344525 100644 (file)
@@ -82,9 +82,18 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model,
 
        /* clear all counters */
        for (i = 0; i < num_counters; ++i) {
-               if (unlikely(!msrs->controls[i].addr))
+               if (unlikely(!msrs->controls[i].addr)) {
+                       if (counter_config[i].enabled && !smp_processor_id())
+                               /*
+                                * counter is reserved, this is on all
+                                * cpus, so report only for cpu #0
+                                */
+                               op_x86_warn_reserved(i);
                        continue;
+               }
                rdmsrl(msrs->controls[i].addr, val);
+               if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
+                       op_x86_warn_in_use(i);
                val &= model->reserved;
                wrmsrl(msrs->controls[i].addr, val);
        }
index 7b8e75d..59fa2bd 100644 (file)
@@ -57,6 +57,17 @@ struct op_x86_model_spec {
 
 struct op_counter_config;
 
+static inline void op_x86_warn_in_use(int counter)
+{
+       pr_warning("oprofile: counter #%d on cpu #%d may already be used\n",
+                  counter, smp_processor_id());
+}
+
+static inline void op_x86_warn_reserved(int counter)
+{
+       pr_warning("oprofile: counter #%d is already reserved\n", counter);
+}
+
 extern u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
                           struct op_counter_config *counter_config);
 extern int op_x86_phys_to_virt(int phys);