]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/x86/oprofile/nmi_int.c
oprofile/x86: notify cpus only when daemon is running
[karo-tx-linux.git] / arch / x86 / oprofile / nmi_int.c
1 /**
2  * @file nmi_int.c
3  *
4  * @remark Copyright 2002-2009 OProfile authors
5  * @remark Read the file COPYING
6  *
7  * @author John Levon <levon@movementarian.org>
8  * @author Robert Richter <robert.richter@amd.com>
9  * @author Barry Kasindorf <barry.kasindorf@amd.com>
10  * @author Jason Yeh <jason.yeh@amd.com>
11  * @author Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
12  */
13
14 #include <linux/init.h>
15 #include <linux/notifier.h>
16 #include <linux/smp.h>
17 #include <linux/oprofile.h>
18 #include <linux/sysdev.h>
19 #include <linux/slab.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kdebug.h>
22 #include <linux/cpu.h>
23 #include <asm/nmi.h>
24 #include <asm/msr.h>
25 #include <asm/apic.h>
26
27 #include "op_counter.h"
28 #include "op_x86_model.h"
29
30 static struct op_x86_model_spec *model;
31 static DEFINE_PER_CPU(struct op_msrs, cpu_msrs);
32 static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
33
34 /* must be protected with get_online_cpus()/put_online_cpus(): */
35 static int nmi_enabled;
36 static int ctr_running;
37
38 struct op_counter_config counter_config[OP_MAX_COUNTER];
39
40 /* common functions */
41
42 u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
43                     struct op_counter_config *counter_config)
44 {
45         u64 val = 0;
46         u16 event = (u16)counter_config->event;
47
48         val |= ARCH_PERFMON_EVENTSEL_INT;
49         val |= counter_config->user ? ARCH_PERFMON_EVENTSEL_USR : 0;
50         val |= counter_config->kernel ? ARCH_PERFMON_EVENTSEL_OS : 0;
51         val |= (counter_config->unit_mask & 0xFF) << 8;
52         event &= model->event_mask ? model->event_mask : 0xFF;
53         val |= event & 0xFF;
54         val |= (event & 0x0F00) << 24;
55
56         return val;
57 }
58
59
60 static int profile_exceptions_notify(struct notifier_block *self,
61                                      unsigned long val, void *data)
62 {
63         struct die_args *args = (struct die_args *)data;
64         int ret = NOTIFY_DONE;
65
66         switch (val) {
67         case DIE_NMI:
68         case DIE_NMI_IPI:
69                 if (ctr_running)
70                         model->check_ctrs(args->regs, &__get_cpu_var(cpu_msrs));
71                 else if (!nmi_enabled)
72                         break;
73                 else
74                         model->stop(&__get_cpu_var(cpu_msrs));
75                 ret = NOTIFY_STOP;
76                 break;
77         default:
78                 break;
79         }
80         return ret;
81 }
82
83 static void nmi_cpu_save_registers(struct op_msrs *msrs)
84 {
85         struct op_msr *counters = msrs->counters;
86         struct op_msr *controls = msrs->controls;
87         unsigned int i;
88
89         for (i = 0; i < model->num_counters; ++i) {
90                 if (counters[i].addr)
91                         rdmsrl(counters[i].addr, counters[i].saved);
92         }
93
94         for (i = 0; i < model->num_controls; ++i) {
95                 if (controls[i].addr)
96                         rdmsrl(controls[i].addr, controls[i].saved);
97         }
98 }
99
100 static void nmi_cpu_start(void *dummy)
101 {
102         struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
103         if (!msrs->controls)
104                 WARN_ON_ONCE(1);
105         else
106                 model->start(msrs);
107 }
108
109 static int nmi_start(void)
110 {
111         get_online_cpus();
112         on_each_cpu(nmi_cpu_start, NULL, 1);
113         ctr_running = 1;
114         put_online_cpus();
115         return 0;
116 }
117
118 static void nmi_cpu_stop(void *dummy)
119 {
120         struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
121         if (!msrs->controls)
122                 WARN_ON_ONCE(1);
123         else
124                 model->stop(msrs);
125 }
126
127 static void nmi_stop(void)
128 {
129         get_online_cpus();
130         on_each_cpu(nmi_cpu_stop, NULL, 1);
131         ctr_running = 0;
132         put_online_cpus();
133 }
134
135 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
136
137 static DEFINE_PER_CPU(int, switch_index);
138
139 static inline int has_mux(void)
140 {
141         return !!model->switch_ctrl;
142 }
143
144 inline int op_x86_phys_to_virt(int phys)
145 {
146         return __get_cpu_var(switch_index) + phys;
147 }
148
149 inline int op_x86_virt_to_phys(int virt)
150 {
151         return virt % model->num_counters;
152 }
153
154 static void nmi_shutdown_mux(void)
155 {
156         int i;
157
158         if (!has_mux())
159                 return;
160
161         for_each_possible_cpu(i) {
162                 kfree(per_cpu(cpu_msrs, i).multiplex);
163                 per_cpu(cpu_msrs, i).multiplex = NULL;
164                 per_cpu(switch_index, i) = 0;
165         }
166 }
167
168 static int nmi_setup_mux(void)
169 {
170         size_t multiplex_size =
171                 sizeof(struct op_msr) * model->num_virt_counters;
172         int i;
173
174         if (!has_mux())
175                 return 1;
176
177         for_each_possible_cpu(i) {
178                 per_cpu(cpu_msrs, i).multiplex =
179                         kzalloc(multiplex_size, GFP_KERNEL);
180                 if (!per_cpu(cpu_msrs, i).multiplex)
181                         return 0;
182         }
183
184         return 1;
185 }
186
187 static void nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs)
188 {
189         int i;
190         struct op_msr *multiplex = msrs->multiplex;
191
192         if (!has_mux())
193                 return;
194
195         for (i = 0; i < model->num_virt_counters; ++i) {
196                 if (counter_config[i].enabled) {
197                         multiplex[i].saved = -(u64)counter_config[i].count;
198                 } else {
199                         multiplex[i].saved = 0;
200                 }
201         }
202
203         per_cpu(switch_index, cpu) = 0;
204 }
205
206 static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs)
207 {
208         struct op_msr *counters = msrs->counters;
209         struct op_msr *multiplex = msrs->multiplex;
210         int i;
211
212         for (i = 0; i < model->num_counters; ++i) {
213                 int virt = op_x86_phys_to_virt(i);
214                 if (counters[i].addr)
215                         rdmsrl(counters[i].addr, multiplex[virt].saved);
216         }
217 }
218
219 static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs)
220 {
221         struct op_msr *counters = msrs->counters;
222         struct op_msr *multiplex = msrs->multiplex;
223         int i;
224
225         for (i = 0; i < model->num_counters; ++i) {
226                 int virt = op_x86_phys_to_virt(i);
227                 if (counters[i].addr)
228                         wrmsrl(counters[i].addr, multiplex[virt].saved);
229         }
230 }
231
232 static void nmi_cpu_switch(void *dummy)
233 {
234         int cpu = smp_processor_id();
235         int si = per_cpu(switch_index, cpu);
236         struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
237
238         nmi_cpu_stop(NULL);
239         nmi_cpu_save_mpx_registers(msrs);
240
241         /* move to next set */
242         si += model->num_counters;
243         if ((si >= model->num_virt_counters) || (counter_config[si].count == 0))
244                 per_cpu(switch_index, cpu) = 0;
245         else
246                 per_cpu(switch_index, cpu) = si;
247
248         model->switch_ctrl(model, msrs);
249         nmi_cpu_restore_mpx_registers(msrs);
250
251         nmi_cpu_start(NULL);
252 }
253
254
255 /*
256  * Quick check to see if multiplexing is necessary.
257  * The check should be sufficient since counters are used
258  * in ordre.
259  */
260 static int nmi_multiplex_on(void)
261 {
262         return counter_config[model->num_counters].count ? 0 : -EINVAL;
263 }
264
265 static int nmi_switch_event(void)
266 {
267         if (!has_mux())
268                 return -ENOSYS;         /* not implemented */
269         if (nmi_multiplex_on() < 0)
270                 return -EINVAL;         /* not necessary */
271
272         get_online_cpus();
273         if (ctr_running)
274                 on_each_cpu(nmi_cpu_switch, NULL, 1);
275         put_online_cpus();
276
277         return 0;
278 }
279
280 static inline void mux_init(struct oprofile_operations *ops)
281 {
282         if (has_mux())
283                 ops->switch_events = nmi_switch_event;
284 }
285
286 static void mux_clone(int cpu)
287 {
288         if (!has_mux())
289                 return;
290
291         memcpy(per_cpu(cpu_msrs, cpu).multiplex,
292                per_cpu(cpu_msrs, 0).multiplex,
293                sizeof(struct op_msr) * model->num_virt_counters);
294 }
295
296 #else
297
298 inline int op_x86_phys_to_virt(int phys) { return phys; }
299 inline int op_x86_virt_to_phys(int virt) { return virt; }
300 static inline void nmi_shutdown_mux(void) { }
301 static inline int nmi_setup_mux(void) { return 1; }
302 static inline void
303 nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs) { }
304 static inline void mux_init(struct oprofile_operations *ops) { }
305 static void mux_clone(int cpu) { }
306
307 #endif
308
309 static void free_msrs(void)
310 {
311         int i;
312         for_each_possible_cpu(i) {
313                 kfree(per_cpu(cpu_msrs, i).counters);
314                 per_cpu(cpu_msrs, i).counters = NULL;
315                 kfree(per_cpu(cpu_msrs, i).controls);
316                 per_cpu(cpu_msrs, i).controls = NULL;
317         }
318         nmi_shutdown_mux();
319 }
320
321 static int allocate_msrs(void)
322 {
323         size_t controls_size = sizeof(struct op_msr) * model->num_controls;
324         size_t counters_size = sizeof(struct op_msr) * model->num_counters;
325
326         int i;
327         for_each_possible_cpu(i) {
328                 per_cpu(cpu_msrs, i).counters = kzalloc(counters_size,
329                                                         GFP_KERNEL);
330                 if (!per_cpu(cpu_msrs, i).counters)
331                         goto fail;
332                 per_cpu(cpu_msrs, i).controls = kzalloc(controls_size,
333                                                         GFP_KERNEL);
334                 if (!per_cpu(cpu_msrs, i).controls)
335                         goto fail;
336         }
337
338         if (!nmi_setup_mux())
339                 goto fail;
340
341         return 1;
342
343 fail:
344         free_msrs();
345         return 0;
346 }
347
348 static void nmi_cpu_setup(void *dummy)
349 {
350         int cpu = smp_processor_id();
351         struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
352         nmi_cpu_save_registers(msrs);
353         spin_lock(&oprofilefs_lock);
354         model->setup_ctrs(model, msrs);
355         nmi_cpu_setup_mux(cpu, msrs);
356         spin_unlock(&oprofilefs_lock);
357         per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC);
358         apic_write(APIC_LVTPC, APIC_DM_NMI);
359 }
360
361 static struct notifier_block profile_exceptions_nb = {
362         .notifier_call = profile_exceptions_notify,
363         .next = NULL,
364         .priority = 2
365 };
366
367 static void nmi_cpu_restore_registers(struct op_msrs *msrs)
368 {
369         struct op_msr *counters = msrs->counters;
370         struct op_msr *controls = msrs->controls;
371         unsigned int i;
372
373         for (i = 0; i < model->num_controls; ++i) {
374                 if (controls[i].addr)
375                         wrmsrl(controls[i].addr, controls[i].saved);
376         }
377
378         for (i = 0; i < model->num_counters; ++i) {
379                 if (counters[i].addr)
380                         wrmsrl(counters[i].addr, counters[i].saved);
381         }
382 }
383
384 static void nmi_cpu_shutdown(void *dummy)
385 {
386         unsigned int v;
387         int cpu = smp_processor_id();
388         struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
389
390         /* restoring APIC_LVTPC can trigger an apic error because the delivery
391          * mode and vector nr combination can be illegal. That's by design: on
392          * power on apic lvt contain a zero vector nr which are legal only for
393          * NMI delivery mode. So inhibit apic err before restoring lvtpc
394          */
395         v = apic_read(APIC_LVTERR);
396         apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
397         apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
398         apic_write(APIC_LVTERR, v);
399         nmi_cpu_restore_registers(msrs);
400 }
401
402 static void nmi_cpu_up(void *dummy)
403 {
404         if (nmi_enabled)
405                 nmi_cpu_setup(dummy);
406         if (ctr_running)
407                 nmi_cpu_start(dummy);
408 }
409
410 static void nmi_cpu_down(void *dummy)
411 {
412         if (ctr_running)
413                 nmi_cpu_stop(dummy);
414         if (nmi_enabled)
415                 nmi_cpu_shutdown(dummy);
416 }
417
418 static int nmi_create_files(struct super_block *sb, struct dentry *root)
419 {
420         unsigned int i;
421
422         for (i = 0; i < model->num_virt_counters; ++i) {
423                 struct dentry *dir;
424                 char buf[4];
425
426                 /* quick little hack to _not_ expose a counter if it is not
427                  * available for use.  This should protect userspace app.
428                  * NOTE:  assumes 1:1 mapping here (that counters are organized
429                  *        sequentially in their struct assignment).
430                  */
431                 if (!avail_to_resrv_perfctr_nmi_bit(op_x86_virt_to_phys(i)))
432                         continue;
433
434                 snprintf(buf,  sizeof(buf), "%d", i);
435                 dir = oprofilefs_mkdir(sb, root, buf);
436                 oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
437                 oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
438                 oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
439                 oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
440                 oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
441                 oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
442         }
443
444         return 0;
445 }
446
447 static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
448                                  void *data)
449 {
450         int cpu = (unsigned long)data;
451         switch (action) {
452         case CPU_DOWN_FAILED:
453         case CPU_ONLINE:
454                 smp_call_function_single(cpu, nmi_cpu_up, NULL, 0);
455                 break;
456         case CPU_DOWN_PREPARE:
457                 smp_call_function_single(cpu, nmi_cpu_down, NULL, 1);
458                 break;
459         }
460         return NOTIFY_DONE;
461 }
462
463 static struct notifier_block oprofile_cpu_nb = {
464         .notifier_call = oprofile_cpu_notifier
465 };
466
467 static int nmi_setup(void)
468 {
469         int err = 0;
470         int cpu;
471
472         if (!allocate_msrs())
473                 return -ENOMEM;
474
475         /* We need to serialize save and setup for HT because the subset
476          * of msrs are distinct for save and setup operations
477          */
478
479         /* Assume saved/restored counters are the same on all CPUs */
480         err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
481         if (err)
482                 goto fail;
483
484         for_each_possible_cpu(cpu) {
485                 if (!cpu)
486                         continue;
487
488                 memcpy(per_cpu(cpu_msrs, cpu).counters,
489                        per_cpu(cpu_msrs, 0).counters,
490                        sizeof(struct op_msr) * model->num_counters);
491
492                 memcpy(per_cpu(cpu_msrs, cpu).controls,
493                        per_cpu(cpu_msrs, 0).controls,
494                        sizeof(struct op_msr) * model->num_controls);
495
496                 mux_clone(cpu);
497         }
498
499         nmi_enabled = 0;
500         ctr_running = 0;
501         barrier();
502         err = register_die_notifier(&profile_exceptions_nb);
503         if (err)
504                 goto fail;
505
506         get_online_cpus();
507         register_cpu_notifier(&oprofile_cpu_nb);
508         on_each_cpu(nmi_cpu_setup, NULL, 1);
509         nmi_enabled = 1;
510         put_online_cpus();
511
512         return 0;
513 fail:
514         free_msrs();
515         return err;
516 }
517
518 static void nmi_shutdown(void)
519 {
520         struct op_msrs *msrs;
521
522         get_online_cpus();
523         unregister_cpu_notifier(&oprofile_cpu_nb);
524         on_each_cpu(nmi_cpu_shutdown, NULL, 1);
525         nmi_enabled = 0;
526         ctr_running = 0;
527         put_online_cpus();
528         barrier();
529         unregister_die_notifier(&profile_exceptions_nb);
530         msrs = &get_cpu_var(cpu_msrs);
531         model->shutdown(msrs);
532         free_msrs();
533         put_cpu_var(cpu_msrs);
534 }
535
536 #ifdef CONFIG_PM
537
538 static int nmi_suspend(struct sys_device *dev, pm_message_t state)
539 {
540         /* Only one CPU left, just stop that one */
541         if (nmi_enabled == 1)
542                 nmi_cpu_stop(NULL);
543         return 0;
544 }
545
546 static int nmi_resume(struct sys_device *dev)
547 {
548         if (nmi_enabled == 1)
549                 nmi_cpu_start(NULL);
550         return 0;
551 }
552
553 static struct sysdev_class oprofile_sysclass = {
554         .name           = "oprofile",
555         .resume         = nmi_resume,
556         .suspend        = nmi_suspend,
557 };
558
559 static struct sys_device device_oprofile = {
560         .id     = 0,
561         .cls    = &oprofile_sysclass,
562 };
563
564 static int __init init_sysfs(void)
565 {
566         int error;
567
568         error = sysdev_class_register(&oprofile_sysclass);
569         if (!error)
570                 error = sysdev_register(&device_oprofile);
571         return error;
572 }
573
574 static void exit_sysfs(void)
575 {
576         sysdev_unregister(&device_oprofile);
577         sysdev_class_unregister(&oprofile_sysclass);
578 }
579
580 #else
581 #define init_sysfs() do { } while (0)
582 #define exit_sysfs() do { } while (0)
583 #endif /* CONFIG_PM */
584
585 static int __init p4_init(char **cpu_type)
586 {
587         __u8 cpu_model = boot_cpu_data.x86_model;
588
589         if (cpu_model > 6 || cpu_model == 5)
590                 return 0;
591
592 #ifndef CONFIG_SMP
593         *cpu_type = "i386/p4";
594         model = &op_p4_spec;
595         return 1;
596 #else
597         switch (smp_num_siblings) {
598         case 1:
599                 *cpu_type = "i386/p4";
600                 model = &op_p4_spec;
601                 return 1;
602
603         case 2:
604                 *cpu_type = "i386/p4-ht";
605                 model = &op_p4_ht2_spec;
606                 return 1;
607         }
608 #endif
609
610         printk(KERN_INFO "oprofile: P4 HyperThreading detected with > 2 threads\n");
611         printk(KERN_INFO "oprofile: Reverting to timer mode.\n");
612         return 0;
613 }
614
615 static int force_arch_perfmon;
616 static int force_cpu_type(const char *str, struct kernel_param *kp)
617 {
618         if (!strcmp(str, "arch_perfmon")) {
619                 force_arch_perfmon = 1;
620                 printk(KERN_INFO "oprofile: forcing architectural perfmon\n");
621         }
622
623         return 0;
624 }
625 module_param_call(cpu_type, force_cpu_type, NULL, NULL, 0);
626
627 static int __init ppro_init(char **cpu_type)
628 {
629         __u8 cpu_model = boot_cpu_data.x86_model;
630         struct op_x86_model_spec *spec = &op_ppro_spec; /* default */
631
632         if (force_arch_perfmon && cpu_has_arch_perfmon)
633                 return 0;
634
635         switch (cpu_model) {
636         case 0 ... 2:
637                 *cpu_type = "i386/ppro";
638                 break;
639         case 3 ... 5:
640                 *cpu_type = "i386/pii";
641                 break;
642         case 6 ... 8:
643         case 10 ... 11:
644                 *cpu_type = "i386/piii";
645                 break;
646         case 9:
647         case 13:
648                 *cpu_type = "i386/p6_mobile";
649                 break;
650         case 14:
651                 *cpu_type = "i386/core";
652                 break;
653         case 15: case 23:
654                 *cpu_type = "i386/core_2";
655                 break;
656         case 0x2e:
657         case 26:
658                 spec = &op_arch_perfmon_spec;
659                 *cpu_type = "i386/core_i7";
660                 break;
661         case 28:
662                 *cpu_type = "i386/atom";
663                 break;
664         default:
665                 /* Unknown */
666                 return 0;
667         }
668
669         model = spec;
670         return 1;
671 }
672
673 /* in order to get sysfs right */
674 static int using_nmi;
675
676 int __init op_nmi_init(struct oprofile_operations *ops)
677 {
678         __u8 vendor = boot_cpu_data.x86_vendor;
679         __u8 family = boot_cpu_data.x86;
680         char *cpu_type = NULL;
681         int ret = 0;
682
683         if (!cpu_has_apic)
684                 return -ENODEV;
685
686         switch (vendor) {
687         case X86_VENDOR_AMD:
688                 /* Needs to be at least an Athlon (or hammer in 32bit mode) */
689
690                 switch (family) {
691                 case 6:
692                         cpu_type = "i386/athlon";
693                         break;
694                 case 0xf:
695                         /*
696                          * Actually it could be i386/hammer too, but
697                          * give user space an consistent name.
698                          */
699                         cpu_type = "x86-64/hammer";
700                         break;
701                 case 0x10:
702                         cpu_type = "x86-64/family10";
703                         break;
704                 case 0x11:
705                         cpu_type = "x86-64/family11h";
706                         break;
707                 default:
708                         return -ENODEV;
709                 }
710                 model = &op_amd_spec;
711                 break;
712
713         case X86_VENDOR_INTEL:
714                 switch (family) {
715                         /* Pentium IV */
716                 case 0xf:
717                         p4_init(&cpu_type);
718                         break;
719
720                         /* A P6-class processor */
721                 case 6:
722                         ppro_init(&cpu_type);
723                         break;
724
725                 default:
726                         break;
727                 }
728
729                 if (cpu_type)
730                         break;
731
732                 if (!cpu_has_arch_perfmon)
733                         return -ENODEV;
734
735                 /* use arch perfmon as fallback */
736                 cpu_type = "i386/arch_perfmon";
737                 model = &op_arch_perfmon_spec;
738                 break;
739
740         default:
741                 return -ENODEV;
742         }
743
744         /* default values, can be overwritten by model */
745         ops->create_files       = nmi_create_files;
746         ops->setup              = nmi_setup;
747         ops->shutdown           = nmi_shutdown;
748         ops->start              = nmi_start;
749         ops->stop               = nmi_stop;
750         ops->cpu_type           = cpu_type;
751
752         if (model->init)
753                 ret = model->init(ops);
754         if (ret)
755                 return ret;
756
757         if (!model->num_virt_counters)
758                 model->num_virt_counters = model->num_counters;
759
760         mux_init(ops);
761
762         init_sysfs();
763         using_nmi = 1;
764         printk(KERN_INFO "oprofile: using NMI interrupt.\n");
765         return 0;
766 }
767
768 void op_nmi_exit(void)
769 {
770         if (using_nmi)
771                 exit_sysfs();
772         if (model->exit)
773                 model->exit();
774 }