4 * @remark Copyright 2002-2008 OProfile authors
5 * @remark Read the file COPYING
7 * @author John Levon <levon@movementarian.org>
8 * @author Robert Richter <robert.richter@amd.com>
11 #include <linux/init.h>
12 #include <linux/notifier.h>
13 #include <linux/smp.h>
14 #include <linux/oprofile.h>
15 #include <linux/sysdev.h>
16 #include <linux/slab.h>
17 #include <linux/moduleparam.h>
18 #include <linux/kdebug.h>
19 #include <linux/cpu.h>
24 #include "op_counter.h"
25 #include "op_x86_model.h"
27 DEFINE_PER_CPU(int, switch_index);
29 static struct op_x86_model_spec const *model;
30 static DEFINE_PER_CPU(struct op_msrs, cpu_msrs);
31 static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
33 static int nmi_start(void);
34 static void nmi_stop(void);
35 static void nmi_cpu_start(void *dummy);
36 static void nmi_cpu_stop(void *dummy);
37 static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs);
38 static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs);
40 /* 0 == registered but off, 1 == registered and on */
41 static int nmi_enabled = 0;
44 static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
47 int cpu = (unsigned long)data;
51 smp_call_function_single(cpu, nmi_cpu_start, NULL, 0);
53 case CPU_DOWN_PREPARE:
54 smp_call_function_single(cpu, nmi_cpu_stop, NULL, 1);
60 static struct notifier_block oprofile_cpu_nb = {
61 .notifier_call = oprofile_cpu_notifier
67 static int nmi_suspend(struct sys_device *dev, pm_message_t state)
69 /* Only one CPU left, just stop that one */
75 static int nmi_resume(struct sys_device *dev)
82 static struct sysdev_class oprofile_sysclass = {
85 .suspend = nmi_suspend,
88 static struct sys_device device_oprofile = {
90 .cls = &oprofile_sysclass,
93 static int __init init_sysfs(void)
97 error = sysdev_class_register(&oprofile_sysclass);
99 error = sysdev_register(&device_oprofile);
103 static void exit_sysfs(void)
105 sysdev_unregister(&device_oprofile);
106 sysdev_class_unregister(&oprofile_sysclass);
110 #define init_sysfs() do { } while (0)
111 #define exit_sysfs() do { } while (0)
112 #endif /* CONFIG_PM */
114 static void nmi_cpu_switch(void *dummy)
116 int cpu = smp_processor_id();
117 int si = per_cpu(switch_index, cpu);
118 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
121 nmi_cpu_save_mpx_registers(msrs);
123 /* move to next set */
124 si += model->num_hardware_counters;
125 if ((si > model->num_counters) || (counter_config[si].count == 0))
126 per_cpu(switch_index, smp_processor_id()) = 0;
128 per_cpu(switch_index, smp_processor_id()) = si;
130 nmi_cpu_restore_mpx_registers(msrs);
131 model->setup_ctrs(msrs);
136 * Quick check to see if multiplexing is necessary.
137 * The check should be sufficient since counters are used
140 static int nmi_multiplex_on(void)
142 return counter_config[model->num_hardware_counters].count ? 0 : -EINVAL;
145 static int nmi_switch_event(void)
147 if (nmi_multiplex_on() < 0)
150 on_each_cpu(nmi_cpu_switch, NULL, 1);
155 static int profile_exceptions_notify(struct notifier_block *self,
156 unsigned long val, void *data)
158 struct die_args *args = (struct die_args *)data;
159 int ret = NOTIFY_DONE;
160 int cpu = smp_processor_id();
164 if (model->check_ctrs(args->regs, &per_cpu(cpu_msrs, cpu)))
173 static void nmi_cpu_save_registers(struct op_msrs *msrs)
175 unsigned int const nr_ctrs = model->num_counters;
176 unsigned int const nr_ctrls = model->num_controls;
177 struct op_msr *counters = msrs->counters;
178 struct op_msr *controls = msrs->controls;
181 for (i = 0; i < nr_ctrs; ++i) {
182 if (counters[i].addr) {
183 rdmsr(counters[i].addr,
184 counters[i].saved.low,
185 counters[i].saved.high);
189 for (i = 0; i < nr_ctrls; ++i) {
190 if (controls[i].addr) {
191 rdmsr(controls[i].addr,
192 controls[i].saved.low,
193 controls[i].saved.high);
198 static void nmi_save_registers(void *dummy)
200 int cpu = smp_processor_id();
201 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
202 nmi_cpu_save_registers(msrs);
205 static void free_msrs(void)
208 for_each_possible_cpu(i) {
209 kfree(per_cpu(cpu_msrs, i).counters);
210 per_cpu(cpu_msrs, i).counters = NULL;
211 kfree(per_cpu(cpu_msrs, i).controls);
212 per_cpu(cpu_msrs, i).controls = NULL;
216 static int allocate_msrs(void)
219 size_t controls_size = sizeof(struct op_msr) * model->num_controls;
220 size_t counters_size = sizeof(struct op_msr) * model->num_counters;
222 for_each_possible_cpu(i) {
223 per_cpu(cpu_msrs, i).counters = kmalloc(counters_size,
225 if (!per_cpu(cpu_msrs, i).counters) {
229 per_cpu(cpu_msrs, i).controls =
230 kmalloc(controls_size, GFP_KERNEL);
231 if (!per_cpu(cpu_msrs, i).controls) {
243 static void nmi_cpu_setup(void *dummy)
245 int cpu = smp_processor_id();
246 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
247 spin_lock(&oprofilefs_lock);
248 model->setup_ctrs(msrs);
249 spin_unlock(&oprofilefs_lock);
250 per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC);
251 apic_write(APIC_LVTPC, APIC_DM_NMI);
254 static struct notifier_block profile_exceptions_nb = {
255 .notifier_call = profile_exceptions_notify,
260 static int nmi_setup(void)
265 if (!allocate_msrs())
268 err = register_die_notifier(&profile_exceptions_nb);
275 * We need to serialize save and setup for HT because the subset
276 * of msrs are distinct for save and setup operations
279 /* Assume saved/restored counters are the same on all CPUs */
280 model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
281 for_each_possible_cpu(cpu) {
283 memcpy(per_cpu(cpu_msrs, cpu).counters,
284 per_cpu(cpu_msrs, 0).counters,
285 sizeof(struct op_msr) * model->num_counters);
287 memcpy(per_cpu(cpu_msrs, cpu).controls,
288 per_cpu(cpu_msrs, 0).controls,
289 sizeof(struct op_msr) * model->num_controls);
292 on_each_cpu(nmi_save_registers, NULL, 1);
293 on_each_cpu(nmi_cpu_setup, NULL, 1);
298 static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs)
300 unsigned int si = __get_cpu_var(switch_index);
301 unsigned int const nr_ctrs = model->num_hardware_counters;
302 struct op_msr *counters = &msrs->counters[si];
305 for (i = 0; i < nr_ctrs; ++i) {
307 if (counters[offset].addr) {
308 rdmsr(counters[offset].addr,
309 counters[offset].multiplex.low,
310 counters[offset].multiplex.high);
315 static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs)
317 unsigned int si = __get_cpu_var(switch_index);
318 unsigned int const nr_ctrs = model->num_hardware_counters;
319 struct op_msr *counters = &msrs->counters[si];
322 for (i = 0; i < nr_ctrs; ++i) {
324 if (counters[offset].addr) {
325 wrmsr(counters[offset].addr,
326 counters[offset].multiplex.low,
327 counters[offset].multiplex.high);
332 static void nmi_cpu_restore_registers(struct op_msrs *msrs)
334 unsigned int const nr_ctrs = model->num_counters;
335 unsigned int const nr_ctrls = model->num_controls;
336 struct op_msr *counters = msrs->counters;
337 struct op_msr *controls = msrs->controls;
340 for (i = 0; i < nr_ctrls; ++i) {
341 if (controls[i].addr) {
342 wrmsr(controls[i].addr,
343 controls[i].saved.low,
344 controls[i].saved.high);
348 for (i = 0; i < nr_ctrs; ++i) {
349 if (counters[i].addr) {
350 wrmsr(counters[i].addr,
351 counters[i].saved.low,
352 counters[i].saved.high);
357 static void nmi_cpu_shutdown(void *dummy)
360 int cpu = smp_processor_id();
361 struct op_msrs *msrs = &__get_cpu_var(cpu_msrs);
363 /* restoring APIC_LVTPC can trigger an apic error because the delivery
364 * mode and vector nr combination can be illegal. That's by design: on
365 * power on apic lvt contain a zero vector nr which are legal only for
366 * NMI delivery mode. So inhibit apic err before restoring lvtpc
368 v = apic_read(APIC_LVTERR);
369 apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
370 apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
371 apic_write(APIC_LVTERR, v);
372 nmi_cpu_restore_registers(msrs);
373 __get_cpu_var(switch_index) = 0;
376 static void nmi_shutdown(void)
378 struct op_msrs *msrs;
381 on_each_cpu(nmi_cpu_shutdown, NULL, 1);
382 unregister_die_notifier(&profile_exceptions_nb);
383 msrs = &get_cpu_var(cpu_msrs);
384 model->shutdown(msrs);
386 put_cpu_var(cpu_msrs);
389 static void nmi_cpu_start(void *dummy)
391 struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
395 static int nmi_start(void)
397 on_each_cpu(nmi_cpu_start, NULL, 1);
401 static void nmi_cpu_stop(void *dummy)
403 struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
407 static void nmi_stop(void)
409 on_each_cpu(nmi_cpu_stop, NULL, 1);
412 struct op_counter_config counter_config[OP_MAX_COUNTER];
414 static int nmi_create_files(struct super_block *sb, struct dentry *root)
418 for (i = 0; i < model->num_counters; ++i) {
422 /* quick little hack to _not_ expose a counter if it is not
423 * available for use. This should protect userspace app.
424 * NOTE: assumes 1:1 mapping here (that counters are organized
425 * sequentially in their struct assignment).
427 if (unlikely(!avail_to_resrv_perfctr_nmi_bit(i)))
430 snprintf(buf, sizeof(buf), "%d", i);
431 dir = oprofilefs_mkdir(sb, root, buf);
432 oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
433 oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
434 oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
435 oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
436 oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
437 oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
438 counter_config[i].save_count_low = 0;
445 module_param(p4force, int, 0);
447 static int __init p4_init(char **cpu_type)
449 __u8 cpu_model = boot_cpu_data.x86_model;
451 if (!p4force && (cpu_model > 6 || cpu_model == 5))
455 *cpu_type = "i386/p4";
459 switch (smp_num_siblings) {
461 *cpu_type = "i386/p4";
466 *cpu_type = "i386/p4-ht";
467 model = &op_p4_ht2_spec;
472 printk(KERN_INFO "oprofile: P4 HyperThreading detected with > 2 threads\n");
473 printk(KERN_INFO "oprofile: Reverting to timer mode.\n");
477 static int __init ppro_init(char **cpu_type)
479 __u8 cpu_model = boot_cpu_data.x86_model;
483 *cpu_type = "i386/ppro";
486 *cpu_type = "i386/pii";
489 *cpu_type = "i386/piii";
492 *cpu_type = "i386/p6_mobile";
495 *cpu_type = "i386/p6";
498 *cpu_type = "i386/core";
501 *cpu_type = "i386/core_2";
504 *cpu_type = "i386/core_2";
511 model = &op_ppro_spec;
515 /* in order to get sysfs right */
516 static int using_nmi;
518 int __init op_nmi_init(struct oprofile_operations *ops)
520 __u8 vendor = boot_cpu_data.x86_vendor;
521 __u8 family = boot_cpu_data.x86;
530 /* Needs to be at least an Athlon (or hammer in 32bit mode) */
536 model = &op_amd_spec;
537 cpu_type = "i386/athlon";
540 model = &op_amd_spec;
541 /* Actually it could be i386/hammer too, but give
542 user space an consistent name. */
543 cpu_type = "x86-64/hammer";
546 model = &op_amd_spec;
547 cpu_type = "x86-64/family10";
550 model = &op_amd_spec;
551 cpu_type = "x86-64/family11h";
556 case X86_VENDOR_INTEL:
560 if (!p4_init(&cpu_type))
564 /* A P6-class processor */
566 if (!ppro_init(&cpu_type))
580 register_cpu_notifier(&oprofile_cpu_nb);
582 /* default values, can be overwritten by model */
583 __raw_get_cpu_var(switch_index) = 0;
584 ops->create_files = nmi_create_files;
585 ops->setup = nmi_setup;
586 ops->shutdown = nmi_shutdown;
587 ops->start = nmi_start;
588 ops->stop = nmi_stop;
589 ops->cpu_type = cpu_type;
590 ops->switch_events = nmi_switch_event;
593 ret = model->init(ops);
599 printk(KERN_INFO "oprofile: using NMI interrupt.\n");
603 void op_nmi_exit(void)
608 unregister_cpu_notifier(&oprofile_cpu_nb);