4 * @remark Copyright 2002-2009 OProfile authors
5 * @remark Read the file COPYING
7 * @author John Levon <levon@movementarian.org>
8 * @author Robert Richter <robert.richter@amd.com>
9 * @author Barry Kasindorf <barry.kasindorf@amd.com>
10 * @author Jason Yeh <jason.yeh@amd.com>
11 * @author Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
14 #include <linux/init.h>
15 #include <linux/notifier.h>
16 #include <linux/smp.h>
17 #include <linux/oprofile.h>
18 #include <linux/sysdev.h>
19 #include <linux/slab.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kdebug.h>
22 #include <linux/cpu.h>
27 #include "op_counter.h"
28 #include "op_x86_model.h"
30 static struct op_x86_model_spec const *model;
31 static DEFINE_PER_CPU(struct op_msrs, cpu_msrs);
32 static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
34 /* 0 == registered but off, 1 == registered and on */
35 static int nmi_enabled = 0;
38 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
39 extern atomic_t multiplex_counter;
42 struct op_counter_config counter_config[OP_MAX_COUNTER];
44 /* common functions */
46 u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
47 struct op_counter_config *counter_config)
50 u16 event = (u16)counter_config->event;
52 val |= ARCH_PERFMON_EVENTSEL_INT;
53 val |= counter_config->user ? ARCH_PERFMON_EVENTSEL_USR : 0;
54 val |= counter_config->kernel ? ARCH_PERFMON_EVENTSEL_OS : 0;
55 val |= (counter_config->unit_mask & 0xFF) << 8;
56 event &= model->event_mask ? model->event_mask : 0xFF;
58 val |= (event & 0x0F00) << 24;
64 static int profile_exceptions_notify(struct notifier_block *self,
65 unsigned long val, void *data)
67 struct die_args *args = (struct die_args *)data;
68 int ret = NOTIFY_DONE;
69 int cpu = smp_processor_id();
74 model->check_ctrs(args->regs, &per_cpu(cpu_msrs, cpu));
83 static void nmi_cpu_save_registers(struct op_msrs *msrs)
85 struct op_msr *counters = msrs->counters;
86 struct op_msr *controls = msrs->controls;
89 for (i = 0; i < model->num_counters; ++i) {
91 rdmsrl(counters[i].addr, counters[i].saved);
94 for (i = 0; i < model->num_controls; ++i) {
96 rdmsrl(controls[i].addr, controls[i].saved);
100 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
102 static DEFINE_PER_CPU(int, switch_index);
104 inline int op_x86_phys_to_virt(int phys)
106 return __get_cpu_var(switch_index) + phys;
109 static void nmi_shutdown_mux(void)
112 for_each_possible_cpu(i) {
113 kfree(per_cpu(cpu_msrs, i).multiplex);
114 per_cpu(cpu_msrs, i).multiplex = NULL;
115 per_cpu(switch_index, i) = 0;
119 static int nmi_setup_mux(void)
121 size_t multiplex_size =
122 sizeof(struct op_msr) * model->num_virt_counters;
124 for_each_possible_cpu(i) {
125 per_cpu(cpu_msrs, i).multiplex =
126 kmalloc(multiplex_size, GFP_KERNEL);
127 if (!per_cpu(cpu_msrs, i).multiplex)
135 inline int op_x86_phys_to_virt(int phys) { return phys; }
136 static inline void nmi_shutdown_mux(void) { }
137 static inline int nmi_setup_mux(void) { return 1; }
141 static void free_msrs(void)
144 for_each_possible_cpu(i) {
145 kfree(per_cpu(cpu_msrs, i).counters);
146 per_cpu(cpu_msrs, i).counters = NULL;
147 kfree(per_cpu(cpu_msrs, i).controls);
148 per_cpu(cpu_msrs, i).controls = NULL;
152 static int allocate_msrs(void)
154 size_t controls_size = sizeof(struct op_msr) * model->num_controls;
155 size_t counters_size = sizeof(struct op_msr) * model->num_counters;
158 for_each_possible_cpu(i) {
159 per_cpu(cpu_msrs, i).counters = kmalloc(counters_size,
161 if (!per_cpu(cpu_msrs, i).counters)
163 per_cpu(cpu_msrs, i).controls = kmalloc(controls_size,
165 if (!per_cpu(cpu_msrs, i).controls)
172 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
174 static void nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs)
177 struct op_msr *multiplex = msrs->multiplex;
179 for (i = 0; i < model->num_virt_counters; ++i) {
180 if (counter_config[i].enabled) {
181 multiplex[i].saved = -(u64)counter_config[i].count;
183 multiplex[i].addr = 0;
184 multiplex[i].saved = 0;
188 per_cpu(switch_index, cpu) = 0;
194 nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs) { }
198 static void nmi_cpu_setup(void *dummy)
200 int cpu = smp_processor_id();
201 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
202 nmi_cpu_save_registers(msrs);
203 spin_lock(&oprofilefs_lock);
204 model->setup_ctrs(model, msrs);
205 nmi_cpu_setup_mux(cpu, msrs);
206 spin_unlock(&oprofilefs_lock);
207 per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC);
208 apic_write(APIC_LVTPC, APIC_DM_NMI);
211 static struct notifier_block profile_exceptions_nb = {
212 .notifier_call = profile_exceptions_notify,
217 static int nmi_setup(void)
222 if (!allocate_msrs())
224 else if (!nmi_setup_mux())
227 err = register_die_notifier(&profile_exceptions_nb);
235 /* We need to serialize save and setup for HT because the subset
236 * of msrs are distinct for save and setup operations
239 /* Assume saved/restored counters are the same on all CPUs */
240 model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
241 for_each_possible_cpu(cpu) {
243 memcpy(per_cpu(cpu_msrs, cpu).counters,
244 per_cpu(cpu_msrs, 0).counters,
245 sizeof(struct op_msr) * model->num_counters);
247 memcpy(per_cpu(cpu_msrs, cpu).controls,
248 per_cpu(cpu_msrs, 0).controls,
249 sizeof(struct op_msr) * model->num_controls);
250 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
251 memcpy(per_cpu(cpu_msrs, cpu).multiplex,
252 per_cpu(cpu_msrs, 0).multiplex,
253 sizeof(struct op_msr) * model->num_virt_counters);
257 on_each_cpu(nmi_cpu_setup, NULL, 1);
262 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
264 static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs)
266 struct op_msr *multiplex = msrs->multiplex;
269 for (i = 0; i < model->num_counters; ++i) {
270 int virt = op_x86_phys_to_virt(i);
271 if (multiplex[virt].addr)
272 rdmsrl(multiplex[virt].addr, multiplex[virt].saved);
276 static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs)
278 struct op_msr *multiplex = msrs->multiplex;
281 for (i = 0; i < model->num_counters; ++i) {
282 int virt = op_x86_phys_to_virt(i);
283 if (multiplex[virt].addr)
284 wrmsrl(multiplex[virt].addr, multiplex[virt].saved);
290 static void nmi_cpu_restore_registers(struct op_msrs *msrs)
292 struct op_msr *counters = msrs->counters;
293 struct op_msr *controls = msrs->controls;
296 for (i = 0; i < model->num_controls; ++i) {
297 if (controls[i].addr)
298 wrmsrl(controls[i].addr, controls[i].saved);
301 for (i = 0; i < model->num_counters; ++i) {
302 if (counters[i].addr)
303 wrmsrl(counters[i].addr, counters[i].saved);
307 static void nmi_cpu_shutdown(void *dummy)
310 int cpu = smp_processor_id();
311 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
313 /* restoring APIC_LVTPC can trigger an apic error because the delivery
314 * mode and vector nr combination can be illegal. That's by design: on
315 * power on apic lvt contain a zero vector nr which are legal only for
316 * NMI delivery mode. So inhibit apic err before restoring lvtpc
318 v = apic_read(APIC_LVTERR);
319 apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
320 apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
321 apic_write(APIC_LVTERR, v);
322 nmi_cpu_restore_registers(msrs);
325 static void nmi_shutdown(void)
327 struct op_msrs *msrs;
330 on_each_cpu(nmi_cpu_shutdown, NULL, 1);
331 unregister_die_notifier(&profile_exceptions_nb);
333 msrs = &get_cpu_var(cpu_msrs);
334 model->shutdown(msrs);
336 put_cpu_var(cpu_msrs);
339 static void nmi_cpu_start(void *dummy)
341 struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
345 static int nmi_start(void)
347 on_each_cpu(nmi_cpu_start, NULL, 1);
351 static void nmi_cpu_stop(void *dummy)
353 struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
357 static void nmi_stop(void)
359 on_each_cpu(nmi_cpu_stop, NULL, 1);
362 static int nmi_create_files(struct super_block *sb, struct dentry *root)
366 for (i = 0; i < model->num_virt_counters; ++i) {
370 #ifndef CONFIG_OPROFILE_EVENT_MULTIPLEX
371 /* quick little hack to _not_ expose a counter if it is not
372 * available for use. This should protect userspace app.
373 * NOTE: assumes 1:1 mapping here (that counters are organized
374 * sequentially in their struct assignment).
376 if (unlikely(!avail_to_resrv_perfctr_nmi_bit(i)))
378 #endif /* CONFIG_OPROFILE_EVENT_MULTIPLEX */
380 snprintf(buf, sizeof(buf), "%d", i);
381 dir = oprofilefs_mkdir(sb, root, buf);
382 oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
383 oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
384 oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
385 oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
386 oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
387 oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
393 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
395 static void nmi_cpu_switch(void *dummy)
397 int cpu = smp_processor_id();
398 int si = per_cpu(switch_index, cpu);
399 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
402 nmi_cpu_save_mpx_registers(msrs);
404 /* move to next set */
405 si += model->num_counters;
406 if ((si > model->num_virt_counters) || (counter_config[si].count == 0))
407 per_cpu(switch_index, cpu) = 0;
409 per_cpu(switch_index, cpu) = si;
411 model->switch_ctrl(model, msrs);
412 nmi_cpu_restore_mpx_registers(msrs);
419 * Quick check to see if multiplexing is necessary.
420 * The check should be sufficient since counters are used
423 static int nmi_multiplex_on(void)
425 return counter_config[model->num_counters].count ? 0 : -EINVAL;
428 static int nmi_switch_event(void)
430 if (!model->switch_ctrl)
431 return -ENOSYS; /* not implemented */
432 if (nmi_multiplex_on() < 0)
433 return -EINVAL; /* not necessary */
435 on_each_cpu(nmi_cpu_switch, NULL, 1);
437 atomic_inc(&multiplex_counter);
445 static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
448 int cpu = (unsigned long)data;
450 case CPU_DOWN_FAILED:
452 smp_call_function_single(cpu, nmi_cpu_start, NULL, 0);
454 case CPU_DOWN_PREPARE:
455 smp_call_function_single(cpu, nmi_cpu_stop, NULL, 1);
461 static struct notifier_block oprofile_cpu_nb = {
462 .notifier_call = oprofile_cpu_notifier
468 static int nmi_suspend(struct sys_device *dev, pm_message_t state)
470 /* Only one CPU left, just stop that one */
471 if (nmi_enabled == 1)
476 static int nmi_resume(struct sys_device *dev)
478 if (nmi_enabled == 1)
483 static struct sysdev_class oprofile_sysclass = {
485 .resume = nmi_resume,
486 .suspend = nmi_suspend,
489 static struct sys_device device_oprofile = {
491 .cls = &oprofile_sysclass,
494 static int __init init_sysfs(void)
498 error = sysdev_class_register(&oprofile_sysclass);
500 error = sysdev_register(&device_oprofile);
504 static void exit_sysfs(void)
506 sysdev_unregister(&device_oprofile);
507 sysdev_class_unregister(&oprofile_sysclass);
511 #define init_sysfs() do { } while (0)
512 #define exit_sysfs() do { } while (0)
513 #endif /* CONFIG_PM */
515 static int __init p4_init(char **cpu_type)
517 __u8 cpu_model = boot_cpu_data.x86_model;
519 if (cpu_model > 6 || cpu_model == 5)
523 *cpu_type = "i386/p4";
527 switch (smp_num_siblings) {
529 *cpu_type = "i386/p4";
534 *cpu_type = "i386/p4-ht";
535 model = &op_p4_ht2_spec;
540 printk(KERN_INFO "oprofile: P4 HyperThreading detected with > 2 threads\n");
541 printk(KERN_INFO "oprofile: Reverting to timer mode.\n");
545 static int force_arch_perfmon;
546 static int force_cpu_type(const char *str, struct kernel_param *kp)
548 if (!strcmp(str, "arch_perfmon")) {
549 force_arch_perfmon = 1;
550 printk(KERN_INFO "oprofile: forcing architectural perfmon\n");
555 module_param_call(cpu_type, force_cpu_type, NULL, NULL, 0);
557 static int __init ppro_init(char **cpu_type)
559 __u8 cpu_model = boot_cpu_data.x86_model;
560 struct op_x86_model_spec const *spec = &op_ppro_spec; /* default */
562 if (force_arch_perfmon && cpu_has_arch_perfmon)
567 *cpu_type = "i386/ppro";
570 *cpu_type = "i386/pii";
574 *cpu_type = "i386/piii";
578 *cpu_type = "i386/p6_mobile";
581 *cpu_type = "i386/core";
584 *cpu_type = "i386/core_2";
587 spec = &op_arch_perfmon_spec;
588 *cpu_type = "i386/core_i7";
591 *cpu_type = "i386/atom";
602 /* in order to get sysfs right */
603 static int using_nmi;
605 int __init op_nmi_init(struct oprofile_operations *ops)
607 __u8 vendor = boot_cpu_data.x86_vendor;
608 __u8 family = boot_cpu_data.x86;
609 char *cpu_type = NULL;
617 /* Needs to be at least an Athlon (or hammer in 32bit mode) */
621 cpu_type = "i386/athlon";
625 * Actually it could be i386/hammer too, but
626 * give user space an consistent name.
628 cpu_type = "x86-64/hammer";
631 cpu_type = "x86-64/family10";
634 cpu_type = "x86-64/family11h";
639 model = &op_amd_spec;
642 case X86_VENDOR_INTEL:
649 /* A P6-class processor */
651 ppro_init(&cpu_type);
661 if (!cpu_has_arch_perfmon)
664 /* use arch perfmon as fallback */
665 cpu_type = "i386/arch_perfmon";
666 model = &op_arch_perfmon_spec;
674 register_cpu_notifier(&oprofile_cpu_nb);
676 /* default values, can be overwritten by model */
677 ops->create_files = nmi_create_files;
678 ops->setup = nmi_setup;
679 ops->shutdown = nmi_shutdown;
680 ops->start = nmi_start;
681 ops->stop = nmi_stop;
682 ops->cpu_type = cpu_type;
683 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
684 ops->switch_events = nmi_switch_event;
688 ret = model->init(ops);
694 printk(KERN_INFO "oprofile: using NMI interrupt.\n");
698 void op_nmi_exit(void)
703 unregister_cpu_notifier(&oprofile_cpu_nb);