3 * P4 model-specific MSR operations
5 * @remark Copyright 2002 OProfile authors
6 * @remark Read the file COPYING
8 * @author Graydon Hoare
11 #include <linux/oprofile.h>
12 #include <linux/smp.h>
13 #include <linux/ptrace.h>
14 #include <linux/nmi.h>
16 #include <asm/fixmap.h>
20 #include "op_x86_model.h"
21 #include "op_counter.h"
25 #define NUM_COUNTERS_NON_HT 8
26 #define NUM_ESCRS_NON_HT 45
27 #define NUM_CCCRS_NON_HT 18
28 #define NUM_CONTROLS_NON_HT (NUM_ESCRS_NON_HT + NUM_CCCRS_NON_HT)
30 #define NUM_COUNTERS_HT2 4
31 #define NUM_ESCRS_HT2 23
32 #define NUM_CCCRS_HT2 9
33 #define NUM_CONTROLS_HT2 (NUM_ESCRS_HT2 + NUM_CCCRS_HT2)
35 static unsigned int num_counters = NUM_COUNTERS_NON_HT;
36 static unsigned int num_controls = NUM_CONTROLS_NON_HT;
38 /* this has to be checked dynamically since the
39 hyper-threadedness of a chip is discovered at
41 static inline void setup_num_counters(void)
44 if (smp_num_siblings == 2) {
45 num_counters = NUM_COUNTERS_HT2;
46 num_controls = NUM_CONTROLS_HT2;
51 static int inline addr_increment(void)
54 return smp_num_siblings == 2 ? 2 : 1;
61 /* tables to simulate simplified hardware view of p4 registers */
62 struct p4_counter_binding {
68 struct p4_event_binding {
69 int escr_select; /* value to put in CCCR */
70 int event_select; /* value to put in ESCR */
72 int virt_counter; /* for this counter... */
73 int escr_address; /* use this ESCR */
77 /* nb: these CTR_* defines are a duplicate of defines in
78 event/i386.p4*events. */
81 #define CTR_BPU_0 (1 << 0)
82 #define CTR_MS_0 (1 << 1)
83 #define CTR_FLAME_0 (1 << 2)
84 #define CTR_IQ_4 (1 << 3)
85 #define CTR_BPU_2 (1 << 4)
86 #define CTR_MS_2 (1 << 5)
87 #define CTR_FLAME_2 (1 << 6)
88 #define CTR_IQ_5 (1 << 7)
90 static struct p4_counter_binding p4_counters[NUM_COUNTERS_NON_HT] = {
91 { CTR_BPU_0, MSR_P4_BPU_PERFCTR0, MSR_P4_BPU_CCCR0 },
92 { CTR_MS_0, MSR_P4_MS_PERFCTR0, MSR_P4_MS_CCCR0 },
93 { CTR_FLAME_0, MSR_P4_FLAME_PERFCTR0, MSR_P4_FLAME_CCCR0 },
94 { CTR_IQ_4, MSR_P4_IQ_PERFCTR4, MSR_P4_IQ_CCCR4 },
95 { CTR_BPU_2, MSR_P4_BPU_PERFCTR2, MSR_P4_BPU_CCCR2 },
96 { CTR_MS_2, MSR_P4_MS_PERFCTR2, MSR_P4_MS_CCCR2 },
97 { CTR_FLAME_2, MSR_P4_FLAME_PERFCTR2, MSR_P4_FLAME_CCCR2 },
98 { CTR_IQ_5, MSR_P4_IQ_PERFCTR5, MSR_P4_IQ_CCCR5 }
101 #define NUM_UNUSED_CCCRS (NUM_CCCRS_NON_HT - NUM_COUNTERS_NON_HT)
103 /* p4 event codes in libop/op_event.h are indices into this table. */
105 static struct p4_event_binding p4_events[NUM_EVENTS] = {
107 { /* BRANCH_RETIRED */
109 { {CTR_IQ_4, MSR_P4_CRU_ESCR2},
110 {CTR_IQ_5, MSR_P4_CRU_ESCR3} }
113 { /* MISPRED_BRANCH_RETIRED */
115 { { CTR_IQ_4, MSR_P4_CRU_ESCR0},
116 { CTR_IQ_5, MSR_P4_CRU_ESCR1} }
119 { /* TC_DELIVER_MODE */
121 { { CTR_MS_0, MSR_P4_TC_ESCR0},
122 { CTR_MS_2, MSR_P4_TC_ESCR1} }
125 { /* BPU_FETCH_REQUEST */
127 { { CTR_BPU_0, MSR_P4_BPU_ESCR0},
128 { CTR_BPU_2, MSR_P4_BPU_ESCR1} }
131 { /* ITLB_REFERENCE */
133 { { CTR_BPU_0, MSR_P4_ITLB_ESCR0},
134 { CTR_BPU_2, MSR_P4_ITLB_ESCR1} }
137 { /* MEMORY_CANCEL */
139 { { CTR_FLAME_0, MSR_P4_DAC_ESCR0},
140 { CTR_FLAME_2, MSR_P4_DAC_ESCR1} }
143 { /* MEMORY_COMPLETE */
145 { { CTR_FLAME_0, MSR_P4_SAAT_ESCR0},
146 { CTR_FLAME_2, MSR_P4_SAAT_ESCR1} }
149 { /* LOAD_PORT_REPLAY */
151 { { CTR_FLAME_0, MSR_P4_SAAT_ESCR0},
152 { CTR_FLAME_2, MSR_P4_SAAT_ESCR1} }
155 { /* STORE_PORT_REPLAY */
157 { { CTR_FLAME_0, MSR_P4_SAAT_ESCR0},
158 { CTR_FLAME_2, MSR_P4_SAAT_ESCR1} }
161 { /* MOB_LOAD_REPLAY */
163 { { CTR_BPU_0, MSR_P4_MOB_ESCR0},
164 { CTR_BPU_2, MSR_P4_MOB_ESCR1} }
167 { /* PAGE_WALK_TYPE */
169 { { CTR_BPU_0, MSR_P4_PMH_ESCR0},
170 { CTR_BPU_2, MSR_P4_PMH_ESCR1} }
173 { /* BSQ_CACHE_REFERENCE */
175 { { CTR_BPU_0, MSR_P4_BSU_ESCR0},
176 { CTR_BPU_2, MSR_P4_BSU_ESCR1} }
179 { /* IOQ_ALLOCATION */
181 { { CTR_BPU_0, MSR_P4_FSB_ESCR0},
185 { /* IOQ_ACTIVE_ENTRIES */
187 { { CTR_BPU_2, MSR_P4_FSB_ESCR1},
191 { /* FSB_DATA_ACTIVITY */
193 { { CTR_BPU_0, MSR_P4_FSB_ESCR0},
194 { CTR_BPU_2, MSR_P4_FSB_ESCR1} }
197 { /* BSQ_ALLOCATION */
199 { { CTR_BPU_0, MSR_P4_BSU_ESCR0},
203 { /* BSQ_ACTIVE_ENTRIES */
205 { { CTR_BPU_2, MSR_P4_BSU_ESCR1 /* guess */},
211 { { CTR_IQ_4, MSR_P4_CRU_ESCR2},
212 { CTR_IQ_5, MSR_P4_CRU_ESCR3} }
215 { /* SSE_INPUT_ASSIST */
217 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
218 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
221 { /* PACKED_SP_UOP */
223 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
224 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
227 { /* PACKED_DP_UOP */
229 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
230 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
233 { /* SCALAR_SP_UOP */
235 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
236 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
239 { /* SCALAR_DP_UOP */
241 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
242 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
245 { /* 64BIT_MMX_UOP */
247 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
248 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
251 { /* 128BIT_MMX_UOP */
253 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
254 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
259 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
260 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
263 { /* X87_SIMD_MOVES_UOP */
265 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
266 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
269 { /* MACHINE_CLEAR */
271 { { CTR_IQ_4, MSR_P4_CRU_ESCR2},
272 { CTR_IQ_5, MSR_P4_CRU_ESCR3} }
275 { /* GLOBAL_POWER_EVENTS */
276 0x06, 0x13 /* older manual says 0x05, newer 0x13 */,
277 { { CTR_BPU_0, MSR_P4_FSB_ESCR0},
278 { CTR_BPU_2, MSR_P4_FSB_ESCR1} }
283 { { CTR_MS_0, MSR_P4_MS_ESCR0},
284 { CTR_MS_2, MSR_P4_MS_ESCR1} }
287 { /* UOP_QUEUE_WRITES */
289 { { CTR_MS_0, MSR_P4_MS_ESCR0},
290 { CTR_MS_2, MSR_P4_MS_ESCR1} }
293 { /* FRONT_END_EVENT */
295 { { CTR_IQ_4, MSR_P4_CRU_ESCR2},
296 { CTR_IQ_5, MSR_P4_CRU_ESCR3} }
299 { /* EXECUTION_EVENT */
301 { { CTR_IQ_4, MSR_P4_CRU_ESCR2},
302 { CTR_IQ_5, MSR_P4_CRU_ESCR3} }
307 { { CTR_IQ_4, MSR_P4_CRU_ESCR2},
308 { CTR_IQ_5, MSR_P4_CRU_ESCR3} }
311 { /* INSTR_RETIRED */
313 { { CTR_IQ_4, MSR_P4_CRU_ESCR0},
314 { CTR_IQ_5, MSR_P4_CRU_ESCR1} }
319 { { CTR_IQ_4, MSR_P4_CRU_ESCR0},
320 { CTR_IQ_5, MSR_P4_CRU_ESCR1} }
325 { { CTR_IQ_4, MSR_P4_RAT_ESCR0},
326 { CTR_IQ_5, MSR_P4_RAT_ESCR1} }
329 { /* RETIRED_MISPRED_BRANCH_TYPE */
331 { { CTR_MS_0, MSR_P4_TBPU_ESCR0},
332 { CTR_MS_2, MSR_P4_TBPU_ESCR1} }
335 { /* RETIRED_BRANCH_TYPE */
337 { { CTR_MS_0, MSR_P4_TBPU_ESCR0},
338 { CTR_MS_2, MSR_P4_TBPU_ESCR1} }
343 #define MISC_PMC_ENABLED_P(x) ((x) & 1 << 7)
345 #define ESCR_RESERVED_BITS 0x80000003
346 #define ESCR_CLEAR(escr) ((escr) &= ESCR_RESERVED_BITS)
347 #define ESCR_SET_USR_0(escr, usr) ((escr) |= (((usr) & 1) << 2))
348 #define ESCR_SET_OS_0(escr, os) ((escr) |= (((os) & 1) << 3))
349 #define ESCR_SET_USR_1(escr, usr) ((escr) |= (((usr) & 1)))
350 #define ESCR_SET_OS_1(escr, os) ((escr) |= (((os) & 1) << 1))
351 #define ESCR_SET_EVENT_SELECT(escr, sel) ((escr) |= (((sel) & 0x3f) << 25))
352 #define ESCR_SET_EVENT_MASK(escr, mask) ((escr) |= (((mask) & 0xffff) << 9))
353 #define ESCR_READ(escr, high, ev, i) do {rdmsr(ev->bindings[(i)].escr_address, (escr), (high)); } while (0)
354 #define ESCR_WRITE(escr, high, ev, i) do {wrmsr(ev->bindings[(i)].escr_address, (escr), (high)); } while (0)
356 #define CCCR_RESERVED_BITS 0x38030FFF
357 #define CCCR_CLEAR(cccr) ((cccr) &= CCCR_RESERVED_BITS)
358 #define CCCR_SET_REQUIRED_BITS(cccr) ((cccr) |= 0x00030000)
359 #define CCCR_SET_ESCR_SELECT(cccr, sel) ((cccr) |= (((sel) & 0x07) << 13))
360 #define CCCR_SET_PMI_OVF_0(cccr) ((cccr) |= (1<<26))
361 #define CCCR_SET_PMI_OVF_1(cccr) ((cccr) |= (1<<27))
362 #define CCCR_SET_ENABLE(cccr) ((cccr) |= (1<<12))
363 #define CCCR_SET_DISABLE(cccr) ((cccr) &= ~(1<<12))
364 #define CCCR_READ(low, high, i) do {rdmsr(p4_counters[(i)].cccr_address, (low), (high)); } while (0)
365 #define CCCR_WRITE(low, high, i) do {wrmsr(p4_counters[(i)].cccr_address, (low), (high)); } while (0)
366 #define CCCR_OVF_P(cccr) ((cccr) & (1U<<31))
367 #define CCCR_CLEAR_OVF(cccr) ((cccr) &= (~(1U<<31)))
369 #define CTR_READ(l, h, i) do {rdmsr(p4_counters[(i)].counter_address, (l), (h)); } while (0)
370 #define CTR_WRITE(l, i) do {wrmsr(p4_counters[(i)].counter_address, -(u32)(l), -1); } while (0)
371 #define CTR_OVERFLOW_P(ctr) (!((ctr) & 0x80000000))
374 /* this assigns a "stagger" to the current CPU, which is used throughout
375 the code in this module as an extra array offset, to select the "even"
376 or "odd" part of all the divided resources. */
377 static unsigned int get_stagger(void)
380 int cpu = smp_processor_id();
381 return cpu != cpumask_first(__get_cpu_var(cpu_sibling_map));
387 /* finally, mediate access to a real hardware counter
388 by passing a "virtual" counter numer to this macro,
389 along with your stagger setting. */
390 #define VIRT_CTR(stagger, i) ((i) + ((num_counters) * (stagger)))
392 static unsigned long reset_value[NUM_COUNTERS_NON_HT];
395 static void p4_fill_in_addresses(struct op_msrs * const msrs)
398 unsigned int addr, cccraddr, stag;
400 setup_num_counters();
401 stag = get_stagger();
403 /* initialize some registers */
404 for (i = 0; i < num_counters; ++i)
405 msrs->counters[i].addr = 0;
406 for (i = 0; i < num_controls; ++i)
407 msrs->controls[i].addr = 0;
409 /* the counter & cccr registers we pay attention to */
410 for (i = 0; i < num_counters; ++i) {
411 addr = p4_counters[VIRT_CTR(stag, i)].counter_address;
412 cccraddr = p4_counters[VIRT_CTR(stag, i)].cccr_address;
413 if (reserve_perfctr_nmi(addr)) {
414 msrs->counters[i].addr = addr;
415 msrs->controls[i].addr = cccraddr;
419 /* 43 ESCR registers in three or four discontiguous group */
420 for (addr = MSR_P4_BSU_ESCR0 + stag;
421 addr < MSR_P4_IQ_ESCR0; ++i, addr += addr_increment()) {
422 if (reserve_evntsel_nmi(addr))
423 msrs->controls[i].addr = addr;
426 /* no IQ_ESCR0/1 on some models, we save a seconde time BSU_ESCR0/1
427 * to avoid special case in nmi_{save|restore}_registers() */
428 if (boot_cpu_data.x86_model >= 0x3) {
429 for (addr = MSR_P4_BSU_ESCR0 + stag;
430 addr <= MSR_P4_BSU_ESCR1; ++i, addr += addr_increment()) {
431 if (reserve_evntsel_nmi(addr))
432 msrs->controls[i].addr = addr;
435 for (addr = MSR_P4_IQ_ESCR0 + stag;
436 addr <= MSR_P4_IQ_ESCR1; ++i, addr += addr_increment()) {
437 if (reserve_evntsel_nmi(addr))
438 msrs->controls[i].addr = addr;
442 for (addr = MSR_P4_RAT_ESCR0 + stag;
443 addr <= MSR_P4_SSU_ESCR0; ++i, addr += addr_increment()) {
444 if (reserve_evntsel_nmi(addr))
445 msrs->controls[i].addr = addr;
448 for (addr = MSR_P4_MS_ESCR0 + stag;
449 addr <= MSR_P4_TC_ESCR1; ++i, addr += addr_increment()) {
450 if (reserve_evntsel_nmi(addr))
451 msrs->controls[i].addr = addr;
454 for (addr = MSR_P4_IX_ESCR0 + stag;
455 addr <= MSR_P4_CRU_ESCR3; ++i, addr += addr_increment()) {
456 if (reserve_evntsel_nmi(addr))
457 msrs->controls[i].addr = addr;
460 /* there are 2 remaining non-contiguously located ESCRs */
462 if (num_counters == NUM_COUNTERS_NON_HT) {
463 /* standard non-HT CPUs handle both remaining ESCRs*/
464 if (reserve_evntsel_nmi(MSR_P4_CRU_ESCR5))
465 msrs->controls[i++].addr = MSR_P4_CRU_ESCR5;
466 if (reserve_evntsel_nmi(MSR_P4_CRU_ESCR4))
467 msrs->controls[i++].addr = MSR_P4_CRU_ESCR4;
469 } else if (stag == 0) {
470 /* HT CPUs give the first remainder to the even thread, as
471 the 32nd control register */
472 if (reserve_evntsel_nmi(MSR_P4_CRU_ESCR4))
473 msrs->controls[i++].addr = MSR_P4_CRU_ESCR4;
476 /* and two copies of the second to the odd thread,
477 for the 22st and 23nd control registers */
478 if (reserve_evntsel_nmi(MSR_P4_CRU_ESCR5)) {
479 msrs->controls[i++].addr = MSR_P4_CRU_ESCR5;
480 msrs->controls[i++].addr = MSR_P4_CRU_ESCR5;
486 static void pmc_setup_one_p4_counter(unsigned int ctr)
489 int const maxbind = 2;
490 unsigned int cccr = 0;
491 unsigned int escr = 0;
492 unsigned int high = 0;
493 unsigned int counter_bit;
494 struct p4_event_binding *ev = NULL;
497 stag = get_stagger();
499 /* convert from counter *number* to counter *bit* */
500 counter_bit = 1 << VIRT_CTR(stag, ctr);
502 /* find our event binding structure. */
503 if (counter_config[ctr].event <= 0 || counter_config[ctr].event > NUM_EVENTS) {
505 "oprofile: P4 event code 0x%lx out of range\n",
506 counter_config[ctr].event);
510 ev = &(p4_events[counter_config[ctr].event - 1]);
512 for (i = 0; i < maxbind; i++) {
513 if (ev->bindings[i].virt_counter & counter_bit) {
516 ESCR_READ(escr, high, ev, i);
519 ESCR_SET_USR_0(escr, counter_config[ctr].user);
520 ESCR_SET_OS_0(escr, counter_config[ctr].kernel);
522 ESCR_SET_USR_1(escr, counter_config[ctr].user);
523 ESCR_SET_OS_1(escr, counter_config[ctr].kernel);
525 ESCR_SET_EVENT_SELECT(escr, ev->event_select);
526 ESCR_SET_EVENT_MASK(escr, counter_config[ctr].unit_mask);
527 ESCR_WRITE(escr, high, ev, i);
530 CCCR_READ(cccr, high, VIRT_CTR(stag, ctr));
532 CCCR_SET_REQUIRED_BITS(cccr);
533 CCCR_SET_ESCR_SELECT(cccr, ev->escr_select);
535 CCCR_SET_PMI_OVF_0(cccr);
537 CCCR_SET_PMI_OVF_1(cccr);
538 CCCR_WRITE(cccr, high, VIRT_CTR(stag, ctr));
544 "oprofile: P4 event code 0x%lx no binding, stag %d ctr %d\n",
545 counter_config[ctr].event, stag, ctr);
549 static void p4_setup_ctrs(struct op_msrs const * const msrs)
552 unsigned int low, high;
555 stag = get_stagger();
557 rdmsr(MSR_IA32_MISC_ENABLE, low, high);
558 if (!MISC_PMC_ENABLED_P(low)) {
559 printk(KERN_ERR "oprofile: P4 PMC not available\n");
563 /* clear the cccrs we will use */
564 for (i = 0 ; i < num_counters ; i++) {
565 if (unlikely(!CTRL_IS_RESERVED(msrs, i)))
567 rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
569 CCCR_SET_REQUIRED_BITS(low);
570 wrmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
573 /* clear all escrs (including those outside our concern) */
574 for (i = num_counters; i < num_controls; i++) {
575 if (unlikely(!CTRL_IS_RESERVED(msrs, i)))
577 wrmsr(msrs->controls[i].addr, 0, 0);
580 /* setup all counters */
581 for (i = 0 ; i < num_counters ; ++i) {
582 if ((counter_config[i].enabled) && (CTRL_IS_RESERVED(msrs, i))) {
583 reset_value[i] = counter_config[i].count;
584 pmc_setup_one_p4_counter(i);
585 CTR_WRITE(counter_config[i].count, VIRT_CTR(stag, i));
593 static int p4_check_ctrs(struct pt_regs * const regs,
594 struct op_msrs const * const msrs)
596 unsigned long ctr, low, high, stag, real;
599 stag = get_stagger();
601 for (i = 0; i < num_counters; ++i) {
607 * there is some eccentricity in the hardware which
608 * requires that we perform 2 extra corrections:
610 * - check both the CCCR:OVF flag for overflow and the
611 * counter high bit for un-flagged overflows.
613 * - write the counter back twice to ensure it gets
616 * the former seems to be related to extra NMIs happening
617 * during the current NMI; the latter is reported as errata
618 * N15 in intel doc 249199-029, pentium 4 specification
619 * update, though their suggested work-around does not
620 * appear to solve the problem.
623 real = VIRT_CTR(stag, i);
625 CCCR_READ(low, high, real);
626 CTR_READ(ctr, high, real);
627 if (CCCR_OVF_P(low) || CTR_OVERFLOW_P(ctr)) {
628 oprofile_add_sample(regs, i);
629 CTR_WRITE(reset_value[i], real);
631 CCCR_WRITE(low, high, real);
632 CTR_WRITE(reset_value[i], real);
636 /* P4 quirk: you have to re-unmask the apic vector */
637 apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED);
639 /* See op_model_ppro.c */
644 static void p4_start(struct op_msrs const * const msrs)
646 unsigned int low, high, stag;
649 stag = get_stagger();
651 for (i = 0; i < num_counters; ++i) {
654 CCCR_READ(low, high, VIRT_CTR(stag, i));
655 CCCR_SET_ENABLE(low);
656 CCCR_WRITE(low, high, VIRT_CTR(stag, i));
661 static void p4_stop(struct op_msrs const * const msrs)
663 unsigned int low, high, stag;
666 stag = get_stagger();
668 for (i = 0; i < num_counters; ++i) {
671 CCCR_READ(low, high, VIRT_CTR(stag, i));
672 CCCR_SET_DISABLE(low);
673 CCCR_WRITE(low, high, VIRT_CTR(stag, i));
677 static void p4_shutdown(struct op_msrs const * const msrs)
681 for (i = 0 ; i < num_counters ; ++i) {
682 if (CTR_IS_RESERVED(msrs, i))
683 release_perfctr_nmi(msrs->counters[i].addr);
686 * some of the control registers are specially reserved in
687 * conjunction with the counter registers (hence the starting offset).
688 * This saves a few bits.
690 for (i = num_counters ; i < num_controls ; ++i) {
691 if (CTRL_IS_RESERVED(msrs, i))
692 release_evntsel_nmi(msrs->controls[i].addr);
698 struct op_x86_model_spec const op_p4_ht2_spec = {
699 .num_counters = NUM_COUNTERS_HT2,
700 .num_controls = NUM_CONTROLS_HT2,
701 .fill_in_addresses = &p4_fill_in_addresses,
702 .setup_ctrs = &p4_setup_ctrs,
703 .check_ctrs = &p4_check_ctrs,
706 .shutdown = &p4_shutdown
710 struct op_x86_model_spec const op_p4_spec = {
711 .num_counters = NUM_COUNTERS_NON_HT,
712 .num_controls = NUM_CONTROLS_NON_HT,
713 .fill_in_addresses = &p4_fill_in_addresses,
714 .setup_ctrs = &p4_setup_ctrs,
715 .check_ctrs = &p4_check_ctrs,
718 .shutdown = &p4_shutdown