4 * @remark Copyright 2002-2009 OProfile authors
5 * @remark Read the file COPYING
7 * @author John Levon <levon@movementarian.org>
8 * @author Robert Richter <robert.richter@amd.com>
9 * @author Barry Kasindorf <barry.kasindorf@amd.com>
10 * @author Jason Yeh <jason.yeh@amd.com>
11 * @author Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
14 #include <linux/init.h>
15 #include <linux/notifier.h>
16 #include <linux/smp.h>
17 #include <linux/oprofile.h>
18 #include <linux/sysdev.h>
19 #include <linux/slab.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kdebug.h>
22 #include <linux/cpu.h>
27 #include "op_counter.h"
28 #include "op_x86_model.h"
30 static struct op_x86_model_spec const *model;
31 static DEFINE_PER_CPU(struct op_msrs, cpu_msrs);
32 static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
34 /* 0 == registered but off, 1 == registered and on */
35 static int nmi_enabled = 0;
38 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
39 extern atomic_t multiplex_counter;
42 struct op_counter_config counter_config[OP_MAX_COUNTER];
44 /* common functions */
46 u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
47 struct op_counter_config *counter_config)
50 u16 event = (u16)counter_config->event;
52 val |= ARCH_PERFMON_EVENTSEL_INT;
53 val |= counter_config->user ? ARCH_PERFMON_EVENTSEL_USR : 0;
54 val |= counter_config->kernel ? ARCH_PERFMON_EVENTSEL_OS : 0;
55 val |= (counter_config->unit_mask & 0xFF) << 8;
56 event &= model->event_mask ? model->event_mask : 0xFF;
58 val |= (event & 0x0F00) << 24;
64 static int profile_exceptions_notify(struct notifier_block *self,
65 unsigned long val, void *data)
67 struct die_args *args = (struct die_args *)data;
68 int ret = NOTIFY_DONE;
69 int cpu = smp_processor_id();
74 model->check_ctrs(args->regs, &per_cpu(cpu_msrs, cpu));
83 static void nmi_cpu_save_registers(struct op_msrs *msrs)
85 struct op_msr *counters = msrs->counters;
86 struct op_msr *controls = msrs->controls;
89 for (i = 0; i < model->num_counters; ++i) {
91 rdmsrl(counters[i].addr, counters[i].saved);
94 for (i = 0; i < model->num_controls; ++i) {
96 rdmsrl(controls[i].addr, controls[i].saved);
100 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
102 static DEFINE_PER_CPU(int, switch_index);
104 inline int op_x86_phys_to_virt(int phys)
106 return __get_cpu_var(switch_index) + phys;
111 inline int op_x86_phys_to_virt(int phys) { return phys; }
115 static void free_msrs(void)
118 for_each_possible_cpu(i) {
119 kfree(per_cpu(cpu_msrs, i).counters);
120 per_cpu(cpu_msrs, i).counters = NULL;
121 kfree(per_cpu(cpu_msrs, i).controls);
122 per_cpu(cpu_msrs, i).controls = NULL;
124 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
125 kfree(per_cpu(cpu_msrs, i).multiplex);
126 per_cpu(cpu_msrs, i).multiplex = NULL;
131 static int allocate_msrs(void)
134 size_t controls_size = sizeof(struct op_msr) * model->num_controls;
135 size_t counters_size = sizeof(struct op_msr) * model->num_counters;
136 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
137 size_t multiplex_size = sizeof(struct op_msr) * model->num_virt_counters;
141 for_each_possible_cpu(i) {
142 per_cpu(cpu_msrs, i).counters = kmalloc(counters_size,
144 if (!per_cpu(cpu_msrs, i).counters) {
148 per_cpu(cpu_msrs, i).controls = kmalloc(controls_size,
150 if (!per_cpu(cpu_msrs, i).controls) {
154 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
155 per_cpu(cpu_msrs, i).multiplex =
156 kmalloc(multiplex_size, GFP_KERNEL);
157 if (!per_cpu(cpu_msrs, i).multiplex) {
170 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
172 static void nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs)
175 struct op_msr *multiplex = msrs->multiplex;
177 for (i = 0; i < model->num_virt_counters; ++i) {
178 if (counter_config[i].enabled) {
179 multiplex[i].saved = -(u64)counter_config[i].count;
181 multiplex[i].addr = 0;
182 multiplex[i].saved = 0;
186 per_cpu(switch_index, cpu) = 0;
192 nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs) { }
196 static void nmi_cpu_setup(void *dummy)
198 int cpu = smp_processor_id();
199 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
200 nmi_cpu_save_registers(msrs);
201 spin_lock(&oprofilefs_lock);
202 model->setup_ctrs(model, msrs);
203 nmi_cpu_setup_mux(cpu, msrs);
204 spin_unlock(&oprofilefs_lock);
205 per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC);
206 apic_write(APIC_LVTPC, APIC_DM_NMI);
209 static struct notifier_block profile_exceptions_nb = {
210 .notifier_call = profile_exceptions_notify,
215 static int nmi_setup(void)
220 if (!allocate_msrs())
223 err = register_die_notifier(&profile_exceptions_nb);
229 /* We need to serialize save and setup for HT because the subset
230 * of msrs are distinct for save and setup operations
233 /* Assume saved/restored counters are the same on all CPUs */
234 model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
235 for_each_possible_cpu(cpu) {
237 memcpy(per_cpu(cpu_msrs, cpu).counters,
238 per_cpu(cpu_msrs, 0).counters,
239 sizeof(struct op_msr) * model->num_counters);
241 memcpy(per_cpu(cpu_msrs, cpu).controls,
242 per_cpu(cpu_msrs, 0).controls,
243 sizeof(struct op_msr) * model->num_controls);
244 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
245 memcpy(per_cpu(cpu_msrs, cpu).multiplex,
246 per_cpu(cpu_msrs, 0).multiplex,
247 sizeof(struct op_msr) * model->num_virt_counters);
251 on_each_cpu(nmi_cpu_setup, NULL, 1);
256 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
258 static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs)
260 struct op_msr *multiplex = msrs->multiplex;
263 for (i = 0; i < model->num_counters; ++i) {
264 int virt = op_x86_phys_to_virt(i);
265 if (multiplex[virt].addr)
266 rdmsrl(multiplex[virt].addr, multiplex[virt].saved);
270 static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs)
272 struct op_msr *multiplex = msrs->multiplex;
275 for (i = 0; i < model->num_counters; ++i) {
276 int virt = op_x86_phys_to_virt(i);
277 if (multiplex[virt].addr)
278 wrmsrl(multiplex[virt].addr, multiplex[virt].saved);
284 static void nmi_cpu_restore_registers(struct op_msrs *msrs)
286 struct op_msr *counters = msrs->counters;
287 struct op_msr *controls = msrs->controls;
290 for (i = 0; i < model->num_controls; ++i) {
291 if (controls[i].addr)
292 wrmsrl(controls[i].addr, controls[i].saved);
295 for (i = 0; i < model->num_counters; ++i) {
296 if (counters[i].addr)
297 wrmsrl(counters[i].addr, counters[i].saved);
301 static void nmi_cpu_shutdown(void *dummy)
304 int cpu = smp_processor_id();
305 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
307 /* restoring APIC_LVTPC can trigger an apic error because the delivery
308 * mode and vector nr combination can be illegal. That's by design: on
309 * power on apic lvt contain a zero vector nr which are legal only for
310 * NMI delivery mode. So inhibit apic err before restoring lvtpc
312 v = apic_read(APIC_LVTERR);
313 apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
314 apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
315 apic_write(APIC_LVTERR, v);
316 nmi_cpu_restore_registers(msrs);
317 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
318 per_cpu(switch_index, cpu) = 0;
322 static void nmi_shutdown(void)
324 struct op_msrs *msrs;
327 on_each_cpu(nmi_cpu_shutdown, NULL, 1);
328 unregister_die_notifier(&profile_exceptions_nb);
329 msrs = &get_cpu_var(cpu_msrs);
330 model->shutdown(msrs);
332 put_cpu_var(cpu_msrs);
335 static void nmi_cpu_start(void *dummy)
337 struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
341 static int nmi_start(void)
343 on_each_cpu(nmi_cpu_start, NULL, 1);
347 static void nmi_cpu_stop(void *dummy)
349 struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
353 static void nmi_stop(void)
355 on_each_cpu(nmi_cpu_stop, NULL, 1);
358 static int nmi_create_files(struct super_block *sb, struct dentry *root)
362 for (i = 0; i < model->num_virt_counters; ++i) {
366 #ifndef CONFIG_OPROFILE_EVENT_MULTIPLEX
367 /* quick little hack to _not_ expose a counter if it is not
368 * available for use. This should protect userspace app.
369 * NOTE: assumes 1:1 mapping here (that counters are organized
370 * sequentially in their struct assignment).
372 if (unlikely(!avail_to_resrv_perfctr_nmi_bit(i)))
374 #endif /* CONFIG_OPROFILE_EVENT_MULTIPLEX */
376 snprintf(buf, sizeof(buf), "%d", i);
377 dir = oprofilefs_mkdir(sb, root, buf);
378 oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
379 oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
380 oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
381 oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
382 oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
383 oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
389 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
391 static void nmi_cpu_switch(void *dummy)
393 int cpu = smp_processor_id();
394 int si = per_cpu(switch_index, cpu);
395 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
398 nmi_cpu_save_mpx_registers(msrs);
400 /* move to next set */
401 si += model->num_counters;
402 if ((si > model->num_virt_counters) || (counter_config[si].count == 0))
403 per_cpu(switch_index, cpu) = 0;
405 per_cpu(switch_index, cpu) = si;
407 model->switch_ctrl(model, msrs);
408 nmi_cpu_restore_mpx_registers(msrs);
415 * Quick check to see if multiplexing is necessary.
416 * The check should be sufficient since counters are used
419 static int nmi_multiplex_on(void)
421 return counter_config[model->num_counters].count ? 0 : -EINVAL;
424 static int nmi_switch_event(void)
426 if (!model->switch_ctrl)
427 return -ENOSYS; /* not implemented */
428 if (nmi_multiplex_on() < 0)
429 return -EINVAL; /* not necessary */
431 on_each_cpu(nmi_cpu_switch, NULL, 1);
433 atomic_inc(&multiplex_counter);
441 static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
444 int cpu = (unsigned long)data;
446 case CPU_DOWN_FAILED:
448 smp_call_function_single(cpu, nmi_cpu_start, NULL, 0);
450 case CPU_DOWN_PREPARE:
451 smp_call_function_single(cpu, nmi_cpu_stop, NULL, 1);
457 static struct notifier_block oprofile_cpu_nb = {
458 .notifier_call = oprofile_cpu_notifier
464 static int nmi_suspend(struct sys_device *dev, pm_message_t state)
466 /* Only one CPU left, just stop that one */
467 if (nmi_enabled == 1)
472 static int nmi_resume(struct sys_device *dev)
474 if (nmi_enabled == 1)
479 static struct sysdev_class oprofile_sysclass = {
481 .resume = nmi_resume,
482 .suspend = nmi_suspend,
485 static struct sys_device device_oprofile = {
487 .cls = &oprofile_sysclass,
490 static int __init init_sysfs(void)
494 error = sysdev_class_register(&oprofile_sysclass);
496 error = sysdev_register(&device_oprofile);
500 static void exit_sysfs(void)
502 sysdev_unregister(&device_oprofile);
503 sysdev_class_unregister(&oprofile_sysclass);
507 #define init_sysfs() do { } while (0)
508 #define exit_sysfs() do { } while (0)
509 #endif /* CONFIG_PM */
511 static int __init p4_init(char **cpu_type)
513 __u8 cpu_model = boot_cpu_data.x86_model;
515 if (cpu_model > 6 || cpu_model == 5)
519 *cpu_type = "i386/p4";
523 switch (smp_num_siblings) {
525 *cpu_type = "i386/p4";
530 *cpu_type = "i386/p4-ht";
531 model = &op_p4_ht2_spec;
536 printk(KERN_INFO "oprofile: P4 HyperThreading detected with > 2 threads\n");
537 printk(KERN_INFO "oprofile: Reverting to timer mode.\n");
541 static int force_arch_perfmon;
542 static int force_cpu_type(const char *str, struct kernel_param *kp)
544 if (!strcmp(str, "arch_perfmon")) {
545 force_arch_perfmon = 1;
546 printk(KERN_INFO "oprofile: forcing architectural perfmon\n");
551 module_param_call(cpu_type, force_cpu_type, NULL, NULL, 0);
553 static int __init ppro_init(char **cpu_type)
555 __u8 cpu_model = boot_cpu_data.x86_model;
556 struct op_x86_model_spec const *spec = &op_ppro_spec; /* default */
558 if (force_arch_perfmon && cpu_has_arch_perfmon)
563 *cpu_type = "i386/ppro";
566 *cpu_type = "i386/pii";
570 *cpu_type = "i386/piii";
574 *cpu_type = "i386/p6_mobile";
577 *cpu_type = "i386/core";
580 *cpu_type = "i386/core_2";
583 spec = &op_arch_perfmon_spec;
584 *cpu_type = "i386/core_i7";
587 *cpu_type = "i386/atom";
598 /* in order to get sysfs right */
599 static int using_nmi;
601 int __init op_nmi_init(struct oprofile_operations *ops)
603 __u8 vendor = boot_cpu_data.x86_vendor;
604 __u8 family = boot_cpu_data.x86;
605 char *cpu_type = NULL;
613 /* Needs to be at least an Athlon (or hammer in 32bit mode) */
617 cpu_type = "i386/athlon";
621 * Actually it could be i386/hammer too, but
622 * give user space an consistent name.
624 cpu_type = "x86-64/hammer";
627 cpu_type = "x86-64/family10";
630 cpu_type = "x86-64/family11h";
635 model = &op_amd_spec;
638 case X86_VENDOR_INTEL:
645 /* A P6-class processor */
647 ppro_init(&cpu_type);
657 if (!cpu_has_arch_perfmon)
660 /* use arch perfmon as fallback */
661 cpu_type = "i386/arch_perfmon";
662 model = &op_arch_perfmon_spec;
670 register_cpu_notifier(&oprofile_cpu_nb);
672 /* default values, can be overwritten by model */
673 ops->create_files = nmi_create_files;
674 ops->setup = nmi_setup;
675 ops->shutdown = nmi_shutdown;
676 ops->start = nmi_start;
677 ops->stop = nmi_stop;
678 ops->cpu_type = cpu_type;
679 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
680 ops->switch_events = nmi_switch_event;
684 ret = model->init(ops);
690 printk(KERN_INFO "oprofile: using NMI interrupt.\n");
694 void op_nmi_exit(void)
699 unregister_cpu_notifier(&oprofile_cpu_nb);