2 * @file op_model_ppro.h
3 * Family 6 perfmon and architectural perfmon MSR operations
5 * @remark Copyright 2002 OProfile authors
6 * @remark Copyright 2008 Intel Corporation
7 * @remark Read the file COPYING
10 * @author Philippe Elie
11 * @author Graydon Hoare
15 #include <linux/oprofile.h>
16 #include <linux/slab.h>
17 #include <asm/ptrace.h>
21 #include <asm/intel_arch_perfmon.h>
23 #include "op_x86_model.h"
24 #include "op_counter.h"
26 static int num_counters = 2;
27 static int counter_width = 32;
29 #define CTR_OVERFLOWED(n) (!((n) & (1ULL<<(counter_width-1))))
31 #define CTRL_READ(l, h, msrs, c) do {rdmsr((msrs->controls[(c)].addr), (l), (h)); } while (0)
32 #define CTRL_WRITE(l, h, msrs, c) do {wrmsr((msrs->controls[(c)].addr), (l), (h)); } while (0)
33 #define CTRL_CLEAR(x) (x &= (1<<21))
34 #define CTRL_SET_EVENT(val, e) (val |= e)
36 static u64 *reset_value;
38 static void ppro_fill_in_addresses(struct op_msrs * const msrs)
42 for (i = 0; i < num_counters; i++) {
43 if (reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i))
44 msrs->counters[i].addr = MSR_P6_PERFCTR0 + i;
46 msrs->counters[i].addr = 0;
49 for (i = 0; i < num_counters; i++) {
50 if (reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i))
51 msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i;
53 msrs->controls[i].addr = 0;
58 static void ppro_setup_ctrs(struct op_msrs const * const msrs)
60 unsigned int low, high;
64 reset_value = kmalloc(sizeof(reset_value[0]) * num_counters,
70 if (cpu_has_arch_perfmon) {
71 union cpuid10_eax eax;
72 eax.full = cpuid_eax(0xa);
75 * For Core2 (family 6, model 15), don't reset the
78 if (!(eax.split.version_id == 0 &&
79 current_cpu_data.x86 == 6 &&
80 current_cpu_data.x86_model == 15)) {
82 if (counter_width < eax.split.bit_width)
83 counter_width = eax.split.bit_width;
87 /* clear all counters */
88 for (i = 0 ; i < num_counters; ++i) {
89 if (unlikely(!CTRL_IS_RESERVED(msrs, i)))
91 CTRL_READ(low, high, msrs, i);
93 CTRL_WRITE(low, high, msrs, i);
96 /* avoid a false detection of ctr overflows in NMI handler */
97 for (i = 0; i < num_counters; ++i) {
98 if (unlikely(!CTR_IS_RESERVED(msrs, i)))
100 wrmsrl(msrs->counters[i].addr, -1LL);
103 /* enable active counters */
104 for (i = 0; i < num_counters; ++i) {
105 if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) {
106 reset_value[i] = counter_config[i].count;
108 wrmsrl(msrs->counters[i].addr, -reset_value[i]);
110 CTRL_READ(low, high, msrs, i);
112 CTRL_SET_ENABLE(low);
113 CTRL_SET_USR(low, counter_config[i].user);
114 CTRL_SET_KERN(low, counter_config[i].kernel);
115 CTRL_SET_UM(low, counter_config[i].unit_mask);
116 CTRL_SET_EVENT(low, counter_config[i].event);
117 CTRL_WRITE(low, high, msrs, i);
125 static int ppro_check_ctrs(struct pt_regs * const regs,
126 struct op_msrs const * const msrs)
131 for (i = 0 ; i < num_counters; ++i) {
134 rdmsrl(msrs->counters[i].addr, val);
135 if (CTR_OVERFLOWED(val)) {
136 oprofile_add_sample(regs, i);
137 wrmsrl(msrs->counters[i].addr, -reset_value[i]);
141 /* Only P6 based Pentium M need to re-unmask the apic vector but it
142 * doesn't hurt other P6 variant */
143 apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED);
145 /* We can't work out if we really handled an interrupt. We
146 * might have caught a *second* counter just after overflowing
147 * the interrupt for this counter then arrives
148 * and we don't find a counter that's overflowed, so we
149 * would return 0 and get dazed + confused. Instead we always
150 * assume we found an overflow. This sucks.
156 static void ppro_start(struct op_msrs const * const msrs)
158 unsigned int low, high;
163 for (i = 0; i < num_counters; ++i) {
164 if (reset_value[i]) {
165 CTRL_READ(low, high, msrs, i);
166 CTRL_SET_ACTIVE(low);
167 CTRL_WRITE(low, high, msrs, i);
173 static void ppro_stop(struct op_msrs const * const msrs)
175 unsigned int low, high;
180 for (i = 0; i < num_counters; ++i) {
183 CTRL_READ(low, high, msrs, i);
184 CTRL_SET_INACTIVE(low);
185 CTRL_WRITE(low, high, msrs, i);
189 static void ppro_shutdown(struct op_msrs const * const msrs)
193 for (i = 0 ; i < num_counters ; ++i) {
194 if (CTR_IS_RESERVED(msrs, i))
195 release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
197 for (i = 0 ; i < num_counters ; ++i) {
198 if (CTRL_IS_RESERVED(msrs, i))
199 release_evntsel_nmi(MSR_P6_EVNTSEL0 + i);
208 struct op_x86_model_spec const op_ppro_spec = {
211 .fill_in_addresses = &ppro_fill_in_addresses,
212 .setup_ctrs = &ppro_setup_ctrs,
213 .check_ctrs = &ppro_check_ctrs,
214 .start = &ppro_start,
216 .shutdown = &ppro_shutdown
220 * Architectural performance monitoring.
222 * Newer Intel CPUs (Core1+) have support for architectural
223 * events described in CPUID 0xA. See the IA32 SDM Vol3b.18 for details.
224 * The advantage of this is that it can be done without knowing about
228 static void arch_perfmon_setup_counters(void)
230 union cpuid10_eax eax;
232 eax.full = cpuid_eax(0xa);
234 /* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */
235 if (eax.split.version_id == 0 && current_cpu_data.x86 == 6 &&
236 current_cpu_data.x86_model == 15) {
237 eax.split.version_id = 2;
238 eax.split.num_counters = 2;
239 eax.split.bit_width = 40;
242 num_counters = eax.split.num_counters;
244 op_arch_perfmon_spec.num_counters = num_counters;
245 op_arch_perfmon_spec.num_controls = num_counters;
248 static int arch_perfmon_init(struct oprofile_operations *ignore)
250 arch_perfmon_setup_counters();
254 struct op_x86_model_spec op_arch_perfmon_spec = {
255 .init = &arch_perfmon_init,
256 /* num_counters/num_controls filled in at runtime */
257 .fill_in_addresses = &ppro_fill_in_addresses,
258 /* user space does the cpuid check for available events */
259 .setup_ctrs = &ppro_setup_ctrs,
260 .check_ctrs = &ppro_check_ctrs,
261 .start = &ppro_start,
263 .shutdown = &ppro_shutdown