/* 0 == registered but off, 1 == registered and on */
static int nmi_enabled = 0;
-
-#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
-extern atomic_t multiplex_counter;
-#endif
-
struct op_counter_config counter_config[OP_MAX_COUNTER];
/* common functions */
static DEFINE_PER_CPU(int, switch_index);
+static inline int has_mux(void)
+{
+ return !!model->switch_ctrl;
+}
+
inline int op_x86_phys_to_virt(int phys)
{
return __get_cpu_var(switch_index) + phys;
}
+inline int op_x86_virt_to_phys(int virt)
+{
+ return virt % model->num_counters;
+}
+
static void nmi_shutdown_mux(void)
{
int i;
+
+ if (!has_mux())
+ return;
+
for_each_possible_cpu(i) {
kfree(per_cpu(cpu_msrs, i).multiplex);
per_cpu(cpu_msrs, i).multiplex = NULL;
size_t multiplex_size =
sizeof(struct op_msr) * model->num_virt_counters;
int i;
+
+ if (!has_mux())
+ return 1;
+
for_each_possible_cpu(i) {
per_cpu(cpu_msrs, i).multiplex =
kmalloc(multiplex_size, GFP_KERNEL);
if (!per_cpu(cpu_msrs, i).multiplex)
return 0;
}
+
return 1;
}
int i;
struct op_msr *multiplex = msrs->multiplex;
+ if (!has_mux())
+ return;
+
for (i = 0; i < model->num_virt_counters; ++i) {
if (counter_config[i].enabled) {
multiplex[i].saved = -(u64)counter_config[i].count;
/* move to next set */
si += model->num_counters;
- if ((si > model->num_virt_counters) || (counter_config[si].count == 0))
+ if ((si >= model->num_virt_counters) || (counter_config[si].count == 0))
per_cpu(switch_index, cpu) = 0;
else
per_cpu(switch_index, cpu) = si;
static int nmi_switch_event(void)
{
- if (!model->switch_ctrl)
+ if (!has_mux())
return -ENOSYS; /* not implemented */
if (nmi_multiplex_on() < 0)
return -EINVAL; /* not necessary */
on_each_cpu(nmi_cpu_switch, NULL, 1);
- atomic_inc(&multiplex_counter);
-
return 0;
}
+static inline void mux_init(struct oprofile_operations *ops)
+{
+ if (has_mux())
+ ops->switch_events = nmi_switch_event;
+}
+
+static void mux_clone(int cpu)
+{
+ if (!has_mux())
+ return;
+
+ memcpy(per_cpu(cpu_msrs, cpu).multiplex,
+ per_cpu(cpu_msrs, 0).multiplex,
+ sizeof(struct op_msr) * model->num_virt_counters);
+}
+
#else
inline int op_x86_phys_to_virt(int phys) { return phys; }
+inline int op_x86_virt_to_phys(int virt) { return virt; }
static inline void nmi_shutdown_mux(void) { }
static inline int nmi_setup_mux(void) { return 1; }
static inline void
nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs) { }
+static inline void mux_init(struct oprofile_operations *ops) { }
+static void mux_clone(int cpu) { }
#endif
/* Assume saved/restored counters are the same on all CPUs */
model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
for_each_possible_cpu(cpu) {
- if (cpu != 0) {
- memcpy(per_cpu(cpu_msrs, cpu).counters,
- per_cpu(cpu_msrs, 0).counters,
- sizeof(struct op_msr) * model->num_counters);
-
- memcpy(per_cpu(cpu_msrs, cpu).controls,
- per_cpu(cpu_msrs, 0).controls,
- sizeof(struct op_msr) * model->num_controls);
-#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
- memcpy(per_cpu(cpu_msrs, cpu).multiplex,
- per_cpu(cpu_msrs, 0).multiplex,
- sizeof(struct op_msr) * model->num_virt_counters);
-#endif
- }
+ if (!cpu)
+ continue;
+
+ memcpy(per_cpu(cpu_msrs, cpu).counters,
+ per_cpu(cpu_msrs, 0).counters,
+ sizeof(struct op_msr) * model->num_counters);
+
+ memcpy(per_cpu(cpu_msrs, cpu).controls,
+ per_cpu(cpu_msrs, 0).controls,
+ sizeof(struct op_msr) * model->num_controls);
+
+ mux_clone(cpu);
}
on_each_cpu(nmi_cpu_setup, NULL, 1);
nmi_enabled = 1;
struct dentry *dir;
char buf[4];
-#ifndef CONFIG_OPROFILE_EVENT_MULTIPLEX
/* quick little hack to _not_ expose a counter if it is not
* available for use. This should protect userspace app.
* NOTE: assumes 1:1 mapping here (that counters are organized
* sequentially in their struct assignment).
*/
- if (unlikely(!avail_to_resrv_perfctr_nmi_bit(i)))
+ if (!avail_to_resrv_perfctr_nmi_bit(op_x86_virt_to_phys(i)))
continue;
-#endif /* CONFIG_OPROFILE_EVENT_MULTIPLEX */
snprintf(buf, sizeof(buf), "%d", i);
dir = oprofilefs_mkdir(sb, root, buf);
case 15: case 23:
*cpu_type = "i386/core_2";
break;
+ case 0x2e:
case 26:
spec = &op_arch_perfmon_spec;
*cpu_type = "i386/core_i7";
ops->start = nmi_start;
ops->stop = nmi_stop;
ops->cpu_type = cpu_type;
-#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
- ops->switch_events = nmi_switch_event;
-#endif
if (model->init)
ret = model->init(ops);
if (!model->num_virt_counters)
model->num_virt_counters = model->num_counters;
+ mux_init(ops);
+
init_sysfs();
using_nmi = 1;
printk(KERN_INFO "oprofile: using NMI interrupt.\n");