2 * @file op_model_ppro.h
3 * Family 6 perfmon and architectural perfmon MSR operations
5 * @remark Copyright 2002 OProfile authors
6 * @remark Copyright 2008 Intel Corporation
7 * @remark Read the file COPYING
10 * @author Philippe Elie
11 * @author Graydon Hoare
13 * @author Robert Richter <robert.richter@amd.com>
16 #include <linux/oprofile.h>
17 #include <linux/slab.h>
18 #include <asm/ptrace.h>
23 #include "op_x86_model.h"
24 #include "op_counter.h"
26 static int num_counters = 2;
27 static int counter_width = 32;
29 #define MSR_PPRO_EVENTSEL_RESERVED ((0xFFFFFFFFULL<<32)|(1ULL<<21))
31 static u64 *reset_value;
33 static void ppro_fill_in_addresses(struct op_msrs * const msrs)
37 for (i = 0; i < num_counters; i++) {
38 if (reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i))
39 msrs->counters[i].addr = MSR_P6_PERFCTR0 + i;
41 msrs->counters[i].addr = 0;
44 for (i = 0; i < num_counters; i++) {
45 if (reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i))
46 msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i;
48 msrs->controls[i].addr = 0;
53 static void ppro_setup_ctrs(struct op_x86_model_spec const *model,
54 struct op_msrs const * const msrs)
60 reset_value = kmalloc(sizeof(reset_value[0]) * num_counters,
66 if (cpu_has_arch_perfmon) {
67 union cpuid10_eax eax;
68 eax.full = cpuid_eax(0xa);
71 * For Core2 (family 6, model 15), don't reset the
74 if (!(eax.split.version_id == 0 &&
75 current_cpu_data.x86 == 6 &&
76 current_cpu_data.x86_model == 15)) {
78 if (counter_width < eax.split.bit_width)
79 counter_width = eax.split.bit_width;
83 /* clear all counters */
84 for (i = 0; i < num_counters; ++i) {
85 if (unlikely(!msrs->controls[i].addr)) {
86 if (counter_config[i].enabled && !smp_processor_id())
88 * counter is reserved, this is on all
89 * cpus, so report only for cpu #0
91 op_x86_warn_reserved(i);
94 rdmsrl(msrs->controls[i].addr, val);
95 if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
96 op_x86_warn_in_use(i);
97 val &= model->reserved;
98 wrmsrl(msrs->controls[i].addr, val);
101 /* avoid a false detection of ctr overflows in NMI handler */
102 for (i = 0; i < num_counters; ++i) {
103 if (unlikely(!msrs->counters[i].addr))
105 wrmsrl(msrs->counters[i].addr, -1LL);
108 /* enable active counters */
109 for (i = 0; i < num_counters; ++i) {
110 if (counter_config[i].enabled && msrs->counters[i].addr) {
111 reset_value[i] = counter_config[i].count;
112 wrmsrl(msrs->counters[i].addr, -reset_value[i]);
113 rdmsrl(msrs->controls[i].addr, val);
114 val &= model->reserved;
115 val |= op_x86_get_ctrl(model, &counter_config[i]);
116 wrmsrl(msrs->controls[i].addr, val);
124 static int ppro_check_ctrs(struct pt_regs * const regs,
125 struct op_msrs const * const msrs)
131 * This can happen if perf counters are in use when
132 * we steal the die notifier NMI.
134 if (unlikely(!reset_value))
137 for (i = 0; i < num_counters; ++i) {
140 rdmsrl(msrs->counters[i].addr, val);
141 if (val & (1ULL << (counter_width - 1)))
143 oprofile_add_sample(regs, i);
144 wrmsrl(msrs->counters[i].addr, -reset_value[i]);
148 /* Only P6 based Pentium M need to re-unmask the apic vector but it
149 * doesn't hurt other P6 variant */
150 apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED);
152 /* We can't work out if we really handled an interrupt. We
153 * might have caught a *second* counter just after overflowing
154 * the interrupt for this counter then arrives
155 * and we don't find a counter that's overflowed, so we
156 * would return 0 and get dazed + confused. Instead we always
157 * assume we found an overflow. This sucks.
163 static void ppro_start(struct op_msrs const * const msrs)
170 for (i = 0; i < num_counters; ++i) {
171 if (reset_value[i]) {
172 rdmsrl(msrs->controls[i].addr, val);
173 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
174 wrmsrl(msrs->controls[i].addr, val);
180 static void ppro_stop(struct op_msrs const * const msrs)
187 for (i = 0; i < num_counters; ++i) {
190 rdmsrl(msrs->controls[i].addr, val);
191 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
192 wrmsrl(msrs->controls[i].addr, val);
196 static void ppro_shutdown(struct op_msrs const * const msrs)
200 for (i = 0; i < num_counters; ++i) {
201 if (msrs->counters[i].addr)
202 release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
204 for (i = 0; i < num_counters; ++i) {
205 if (msrs->controls[i].addr)
206 release_evntsel_nmi(MSR_P6_EVNTSEL0 + i);
215 struct op_x86_model_spec op_ppro_spec = {
218 .reserved = MSR_PPRO_EVENTSEL_RESERVED,
219 .fill_in_addresses = &ppro_fill_in_addresses,
220 .setup_ctrs = &ppro_setup_ctrs,
221 .check_ctrs = &ppro_check_ctrs,
222 .start = &ppro_start,
224 .shutdown = &ppro_shutdown
228 * Architectural performance monitoring.
230 * Newer Intel CPUs (Core1+) have support for architectural
231 * events described in CPUID 0xA. See the IA32 SDM Vol3b.18 for details.
232 * The advantage of this is that it can be done without knowing about
236 static void arch_perfmon_setup_counters(void)
238 union cpuid10_eax eax;
240 eax.full = cpuid_eax(0xa);
242 /* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */
243 if (eax.split.version_id == 0 && current_cpu_data.x86 == 6 &&
244 current_cpu_data.x86_model == 15) {
245 eax.split.version_id = 2;
246 eax.split.num_events = 2;
247 eax.split.bit_width = 40;
250 num_counters = eax.split.num_events;
252 op_arch_perfmon_spec.num_counters = num_counters;
253 op_arch_perfmon_spec.num_controls = num_counters;
256 static int arch_perfmon_init(struct oprofile_operations *ignore)
258 arch_perfmon_setup_counters();
262 struct op_x86_model_spec op_arch_perfmon_spec = {
263 .reserved = MSR_PPRO_EVENTSEL_RESERVED,
264 .init = &arch_perfmon_init,
265 /* num_counters/num_controls filled in at runtime */
266 .fill_in_addresses = &ppro_fill_in_addresses,
267 /* user space does the cpuid check for available events */
268 .setup_ctrs = &ppro_setup_ctrs,
269 .check_ctrs = &ppro_check_ctrs,
270 .start = &ppro_start,
272 .shutdown = &ppro_shutdown