Revert "Oprofile Multiplexing Patch"
[safe/jmp/linux-2.6] / arch / x86 / oprofile / nmi_int.c
1 /**
2  * @file nmi_int.c
3  *
4  * @remark Copyright 2002-2008 OProfile authors
5  * @remark Read the file COPYING
6  *
7  * @author John Levon <levon@movementarian.org>
8  * @author Robert Richter <robert.richter@amd.com>
9  */
10
11 #include <linux/init.h>
12 #include <linux/notifier.h>
13 #include <linux/smp.h>
14 #include <linux/oprofile.h>
15 #include <linux/sysdev.h>
16 #include <linux/slab.h>
17 #include <linux/moduleparam.h>
18 #include <linux/kdebug.h>
19 #include <asm/nmi.h>
20 #include <asm/msr.h>
21 #include <asm/apic.h>
22
23 #include "op_counter.h"
24 #include "op_x86_model.h"
25
26 static struct op_x86_model_spec const *model;
27 static DEFINE_PER_CPU(struct op_msrs, cpu_msrs);
28 static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
29
30 static int nmi_start(void);
31 static void nmi_stop(void);
32
33 /* 0 == registered but off, 1 == registered and on */
34 static int nmi_enabled = 0;
35
36 #ifdef CONFIG_PM
37
38 static int nmi_suspend(struct sys_device *dev, pm_message_t state)
39 {
40         if (nmi_enabled == 1)
41                 nmi_stop();
42         return 0;
43 }
44
45 static int nmi_resume(struct sys_device *dev)
46 {
47         if (nmi_enabled == 1)
48                 nmi_start();
49         return 0;
50 }
51
52 static struct sysdev_class oprofile_sysclass = {
53         .name           = "oprofile",
54         .resume         = nmi_resume,
55         .suspend        = nmi_suspend,
56 };
57
58 static struct sys_device device_oprofile = {
59         .id     = 0,
60         .cls    = &oprofile_sysclass,
61 };
62
63 static int __init init_sysfs(void)
64 {
65         int error;
66
67         error = sysdev_class_register(&oprofile_sysclass);
68         if (!error)
69                 error = sysdev_register(&device_oprofile);
70         return error;
71 }
72
73 static void exit_sysfs(void)
74 {
75         sysdev_unregister(&device_oprofile);
76         sysdev_class_unregister(&oprofile_sysclass);
77 }
78
79 #else
80 #define init_sysfs() do { } while (0)
81 #define exit_sysfs() do { } while (0)
82 #endif /* CONFIG_PM */
83
84 static int profile_exceptions_notify(struct notifier_block *self,
85                                      unsigned long val, void *data)
86 {
87         struct die_args *args = (struct die_args *)data;
88         int ret = NOTIFY_DONE;
89         int cpu = smp_processor_id();
90
91         switch (val) {
92         case DIE_NMI:
93                 if (model->check_ctrs(args->regs, &per_cpu(cpu_msrs, cpu)))
94                         ret = NOTIFY_STOP;
95                 break;
96         default:
97                 break;
98         }
99         return ret;
100 }
101
102 static void nmi_cpu_save_registers(struct op_msrs *msrs)
103 {
104         unsigned int const nr_ctrs = model->num_counters;
105         unsigned int const nr_ctrls = model->num_controls;
106         struct op_msr *counters = msrs->counters;
107         struct op_msr *controls = msrs->controls;
108         unsigned int i;
109
110         for (i = 0; i < nr_ctrs; ++i) {
111                 if (counters[i].addr) {
112                         rdmsr(counters[i].addr,
113                                 counters[i].saved.low,
114                                 counters[i].saved.high);
115                 }
116         }
117
118         for (i = 0; i < nr_ctrls; ++i) {
119                 if (controls[i].addr) {
120                         rdmsr(controls[i].addr,
121                                 controls[i].saved.low,
122                                 controls[i].saved.high);
123                 }
124         }
125 }
126
127 static void nmi_save_registers(void *dummy)
128 {
129         int cpu = smp_processor_id();
130         struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
131         nmi_cpu_save_registers(msrs);
132 }
133
134 static void free_msrs(void)
135 {
136         int i;
137         for_each_possible_cpu(i) {
138                 kfree(per_cpu(cpu_msrs, i).counters);
139                 per_cpu(cpu_msrs, i).counters = NULL;
140                 kfree(per_cpu(cpu_msrs, i).controls);
141                 per_cpu(cpu_msrs, i).controls = NULL;
142         }
143 }
144
145 static int allocate_msrs(void)
146 {
147         int success = 1;
148         size_t controls_size = sizeof(struct op_msr) * model->num_controls;
149         size_t counters_size = sizeof(struct op_msr) * model->num_counters;
150
151         int i;
152         for_each_possible_cpu(i) {
153                 per_cpu(cpu_msrs, i).counters = kmalloc(counters_size,
154                                                                 GFP_KERNEL);
155                 if (!per_cpu(cpu_msrs, i).counters) {
156                         success = 0;
157                         break;
158                 }
159                 per_cpu(cpu_msrs, i).controls = kmalloc(controls_size,
160                                                                 GFP_KERNEL);
161                 if (!per_cpu(cpu_msrs, i).controls) {
162                         success = 0;
163                         break;
164                 }
165         }
166
167         if (!success)
168                 free_msrs();
169
170         return success;
171 }
172
173 static void nmi_cpu_setup(void *dummy)
174 {
175         int cpu = smp_processor_id();
176         struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
177         spin_lock(&oprofilefs_lock);
178         model->setup_ctrs(msrs);
179         spin_unlock(&oprofilefs_lock);
180         per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC);
181         apic_write(APIC_LVTPC, APIC_DM_NMI);
182 }
183
184 static struct notifier_block profile_exceptions_nb = {
185         .notifier_call = profile_exceptions_notify,
186         .next = NULL,
187         .priority = 0
188 };
189
190 static int nmi_setup(void)
191 {
192         int err = 0;
193         int cpu;
194
195         if (!allocate_msrs())
196                 return -ENOMEM;
197
198         err = register_die_notifier(&profile_exceptions_nb);
199         if (err) {
200                 free_msrs();
201                 return err;
202         }
203
204         /* We need to serialize save and setup for HT because the subset
205          * of msrs are distinct for save and setup operations
206          */
207
208         /* Assume saved/restored counters are the same on all CPUs */
209         model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
210         for_each_possible_cpu(cpu) {
211                 if (cpu != 0) {
212                         memcpy(per_cpu(cpu_msrs, cpu).counters,
213                                 per_cpu(cpu_msrs, 0).counters,
214                                 sizeof(struct op_msr) * model->num_counters);
215
216                         memcpy(per_cpu(cpu_msrs, cpu).controls,
217                                 per_cpu(cpu_msrs, 0).controls,
218                                 sizeof(struct op_msr) * model->num_controls);
219                 }
220
221         }
222         on_each_cpu(nmi_save_registers, NULL, 1);
223         on_each_cpu(nmi_cpu_setup, NULL, 1);
224         nmi_enabled = 1;
225         return 0;
226 }
227
228 static void nmi_restore_registers(struct op_msrs *msrs)
229 {
230         unsigned int const nr_ctrs = model->num_counters;
231         unsigned int const nr_ctrls = model->num_controls;
232         struct op_msr *counters = msrs->counters;
233         struct op_msr *controls = msrs->controls;
234         unsigned int i;
235
236         for (i = 0; i < nr_ctrls; ++i) {
237                 if (controls[i].addr) {
238                         wrmsr(controls[i].addr,
239                                 controls[i].saved.low,
240                                 controls[i].saved.high);
241                 }
242         }
243
244         for (i = 0; i < nr_ctrs; ++i) {
245                 if (counters[i].addr) {
246                         wrmsr(counters[i].addr,
247                                 counters[i].saved.low,
248                                 counters[i].saved.high);
249                 }
250         }
251 }
252
253 static void nmi_cpu_shutdown(void *dummy)
254 {
255         unsigned int v;
256         int cpu = smp_processor_id();
257         struct op_msrs *msrs = &__get_cpu_var(cpu_msrs);
258
259         /* restoring APIC_LVTPC can trigger an apic error because the delivery
260          * mode and vector nr combination can be illegal. That's by design: on
261          * power on apic lvt contain a zero vector nr which are legal only for
262          * NMI delivery mode. So inhibit apic err before restoring lvtpc
263          */
264         v = apic_read(APIC_LVTERR);
265         apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
266         apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
267         apic_write(APIC_LVTERR, v);
268         nmi_restore_registers(msrs);
269 }
270
271 static void nmi_shutdown(void)
272 {
273         struct op_msrs *msrs = &get_cpu_var(cpu_msrs);
274         nmi_enabled = 0;
275         on_each_cpu(nmi_cpu_shutdown, NULL, 1);
276         unregister_die_notifier(&profile_exceptions_nb);
277         model->shutdown(msrs);
278         free_msrs();
279         put_cpu_var(cpu_msrs);
280 }
281
282 static void nmi_cpu_start(void *dummy)
283 {
284         struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
285         model->start(msrs);
286 }
287
288 static int nmi_start(void)
289 {
290         on_each_cpu(nmi_cpu_start, NULL, 1);
291         return 0;
292 }
293
294 static void nmi_cpu_stop(void *dummy)
295 {
296         struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
297         model->stop(msrs);
298 }
299
300 static void nmi_stop(void)
301 {
302         on_each_cpu(nmi_cpu_stop, NULL, 1);
303 }
304
305 struct op_counter_config counter_config[OP_MAX_COUNTER];
306
307 static int nmi_create_files(struct super_block *sb, struct dentry *root)
308 {
309         unsigned int i;
310
311         for (i = 0; i < model->num_counters; ++i) {
312                 struct dentry *dir;
313                 char buf[4];
314
315                 /* quick little hack to _not_ expose a counter if it is not
316                  * available for use.  This should protect userspace app.
317                  * NOTE:  assumes 1:1 mapping here (that counters are organized
318                  *        sequentially in their struct assignment).
319                  */
320                 if (unlikely(!avail_to_resrv_perfctr_nmi_bit(i)))
321                         continue;
322
323                 snprintf(buf,  sizeof(buf), "%d", i);
324                 dir = oprofilefs_mkdir(sb, root, buf);
325                 oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
326                 oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
327                 oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
328                 oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
329                 oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
330                 oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
331         }
332
333         return 0;
334 }
335
336 static int p4force;
337 module_param(p4force, int, 0);
338
339 static int __init p4_init(char **cpu_type)
340 {
341         __u8 cpu_model = boot_cpu_data.x86_model;
342
343         if (!p4force && (cpu_model > 6 || cpu_model == 5))
344                 return 0;
345
346 #ifndef CONFIG_SMP
347         *cpu_type = "i386/p4";
348         model = &op_p4_spec;
349         return 1;
350 #else
351         switch (smp_num_siblings) {
352         case 1:
353                 *cpu_type = "i386/p4";
354                 model = &op_p4_spec;
355                 return 1;
356
357         case 2:
358                 *cpu_type = "i386/p4-ht";
359                 model = &op_p4_ht2_spec;
360                 return 1;
361         }
362 #endif
363
364         printk(KERN_INFO "oprofile: P4 HyperThreading detected with > 2 threads\n");
365         printk(KERN_INFO "oprofile: Reverting to timer mode.\n");
366         return 0;
367 }
368
369 static int __init ppro_init(char **cpu_type)
370 {
371         __u8 cpu_model = boot_cpu_data.x86_model;
372
373         switch (cpu_model) {
374         case 0 ... 2:
375                 *cpu_type = "i386/ppro";
376                 break;
377         case 3 ... 5:
378                 *cpu_type = "i386/pii";
379                 break;
380         case 6 ... 8:
381                 *cpu_type = "i386/piii";
382                 break;
383         case 9:
384                 *cpu_type = "i386/p6_mobile";
385                 break;
386         case 10 ... 13:
387                 *cpu_type = "i386/p6";
388                 break;
389         case 14:
390                 *cpu_type = "i386/core";
391                 break;
392         case 15: case 23:
393                 *cpu_type = "i386/core_2";
394                 break;
395         case 26:
396                 *cpu_type = "i386/core_2";
397                 break;
398         default:
399                 /* Unknown */
400                 return 0;
401         }
402
403         model = &op_ppro_spec;
404         return 1;
405 }
406
407 /* in order to get sysfs right */
408 static int using_nmi;
409
410 int __init op_nmi_init(struct oprofile_operations *ops)
411 {
412         __u8 vendor = boot_cpu_data.x86_vendor;
413         __u8 family = boot_cpu_data.x86;
414         char *cpu_type;
415         int ret = 0;
416
417         if (!cpu_has_apic)
418                 return -ENODEV;
419
420         switch (vendor) {
421         case X86_VENDOR_AMD:
422                 /* Needs to be at least an Athlon (or hammer in 32bit mode) */
423
424                 switch (family) {
425                 default:
426                         return -ENODEV;
427                 case 6:
428                         model = &op_amd_spec;
429                         cpu_type = "i386/athlon";
430                         break;
431                 case 0xf:
432                         model = &op_amd_spec;
433                         /* Actually it could be i386/hammer too, but give
434                          user space an consistent name. */
435                         cpu_type = "x86-64/hammer";
436                         break;
437                 case 0x10:
438                         model = &op_amd_spec;
439                         cpu_type = "x86-64/family10";
440                         break;
441                 case 0x11:
442                         model = &op_amd_spec;
443                         cpu_type = "x86-64/family11h";
444                         break;
445                 }
446                 break;
447
448         case X86_VENDOR_INTEL:
449                 switch (family) {
450                         /* Pentium IV */
451                 case 0xf:
452                         if (!p4_init(&cpu_type))
453                                 return -ENODEV;
454                         break;
455
456                         /* A P6-class processor */
457                 case 6:
458                         if (!ppro_init(&cpu_type))
459                                 return -ENODEV;
460                         break;
461
462                 default:
463                         return -ENODEV;
464                 }
465                 break;
466
467         default:
468                 return -ENODEV;
469         }
470
471         /* default values, can be overwritten by model */
472         ops->create_files = nmi_create_files;
473         ops->setup = nmi_setup;
474         ops->shutdown = nmi_shutdown;
475         ops->start = nmi_start;
476         ops->stop = nmi_stop;
477         ops->cpu_type = cpu_type;
478
479         if (model->init)
480                 ret = model->init(ops);
481         if (ret)
482                 return ret;
483
484         init_sysfs();
485         using_nmi = 1;
486         printk(KERN_INFO "oprofile: using NMI interrupt.\n");
487         return 0;
488 }
489
490 void op_nmi_exit(void)
491 {
492         if (using_nmi)
493                 exit_sysfs();
494         if (model->exit)
495                 model->exit();
496 }