]> git.karo-electronics.de Git - linux-beck.git/blobdiff - arch/x86/oprofile/nmi_int.c
Merge branch 'x86/oprofile' into oprofile
[linux-beck.git] / arch / x86 / oprofile / nmi_int.c
index 2b6ad5b9f9d53f0012629f5c255f3d12e6d87f82..fb4902bc6f147e1bf35d143862502fe8553a780f 100644 (file)
@@ -1,10 +1,11 @@
 /**
  * @file nmi_int.c
  *
- * @remark Copyright 2002 OProfile authors
+ * @remark Copyright 2002-2008 OProfile authors
  * @remark Read the file COPYING
  *
  * @author John Levon <levon@movementarian.org>
+ * @author Robert Richter <robert.richter@amd.com>
  */
 
 #include <linux/init.h>
 #include "op_counter.h"
 #include "op_x86_model.h"
 
+DEFINE_PER_CPU(int, switch_index);
+
 static struct op_x86_model_spec const *model;
 static DEFINE_PER_CPU(struct op_msrs, cpu_msrs);
 static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
 
 static int nmi_start(void);
 static void nmi_stop(void);
+static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs);
+static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs);
+static void nmi_cpu_stop(void *dummy);
+static void nmi_cpu_start(void *dummy);
 
 /* 0 == registered but off, 1 == registered and on */
 static int nmi_enabled = 0;
@@ -80,6 +87,47 @@ static void exit_sysfs(void)
 #define exit_sysfs() do { } while (0)
 #endif /* CONFIG_PM */
 
+static void nmi_cpu_switch(void *dummy)
+{
+       int cpu = smp_processor_id();
+       int si = per_cpu(switch_index, cpu);
+       struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
+
+       nmi_cpu_stop(NULL);
+       nmi_cpu_save_mpx_registers(msrs);
+
+       /* move to next set */
+       si += model->num_hardware_counters;
+       if ((si > model->num_counters) || (counter_config[si].count == 0))
+               per_cpu(switch_index, smp_processor_id()) = 0;
+       else
+               per_cpu(switch_index, smp_processor_id()) = si;
+
+       nmi_cpu_restore_mpx_registers(msrs);
+       model->setup_ctrs(msrs);
+       nmi_cpu_start(NULL);
+}
+
+/*
+ * Quick check to see if multiplexing is necessary.
+ * The check should be sufficient since counters are used
+ * in ordre.
+ */
+static int nmi_multiplex_on(void)
+{
+       return counter_config[model->num_hardware_counters].count ? 0 : -EINVAL;
+}
+
+static int nmi_switch_event(void)
+{
+       if (nmi_multiplex_on() < 0)
+               return -EINVAL;
+
+       on_each_cpu(nmi_cpu_switch, NULL, 1);
+
+       return 0;
+}
+
 static int profile_exceptions_notify(struct notifier_block *self,
                                     unsigned long val, void *data)
 {
@@ -143,11 +191,10 @@ static void free_msrs(void)
 
 static int allocate_msrs(void)
 {
-       int success = 1;
+       int i, success = 1;
        size_t controls_size = sizeof(struct op_msr) * model->num_controls;
        size_t counters_size = sizeof(struct op_msr) * model->num_counters;
 
-       int i;
        for_each_possible_cpu(i) {
                per_cpu(cpu_msrs, i).counters = kmalloc(counters_size,
                                                                GFP_KERNEL);
@@ -155,8 +202,8 @@ static int allocate_msrs(void)
                        success = 0;
                        break;
                }
-               per_cpu(cpu_msrs, i).controls = kmalloc(controls_size,
-                                                               GFP_KERNEL);
+               per_cpu(cpu_msrs, i).controls =
+                               kmalloc(controls_size, GFP_KERNEL);
                if (!per_cpu(cpu_msrs, i).controls) {
                        success = 0;
                        break;
@@ -200,7 +247,8 @@ static int nmi_setup(void)
                return err;
        }
 
-       /* We need to serialize save and setup for HT because the subset
+       /*
+        * We need to serialize save and setup for HT because the subset
         * of msrs are distinct for save and setup operations
         */
 
@@ -216,15 +264,48 @@ static int nmi_setup(void)
                                per_cpu(cpu_msrs, 0).controls,
                                sizeof(struct op_msr) * model->num_controls);
                }
-
        }
-       on_each_cpu(nmi_save_registers, NULL, 0, 1);
-       on_each_cpu(nmi_cpu_setup, NULL, 0, 1);
+       on_each_cpu(nmi_save_registers, NULL, 1);
+       on_each_cpu(nmi_cpu_setup, NULL, 1);
        nmi_enabled = 1;
        return 0;
 }
 
-static void nmi_restore_registers(struct op_msrs *msrs)
+static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs)
+{
+       unsigned int si = __get_cpu_var(switch_index);
+       unsigned int const nr_ctrs = model->num_hardware_counters;
+       struct op_msr *counters = &msrs->counters[si];
+       unsigned int i;
+
+       for (i = 0; i < nr_ctrs; ++i) {
+               int offset = i + si;
+               if (counters[offset].addr) {
+                       rdmsr(counters[offset].addr,
+                               counters[offset].multiplex.low,
+                               counters[offset].multiplex.high);
+               }
+       }
+}
+
+static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs)
+{
+       unsigned int si = __get_cpu_var(switch_index);
+       unsigned int const nr_ctrs = model->num_hardware_counters;
+       struct op_msr *counters = &msrs->counters[si];
+       unsigned int i;
+
+       for (i = 0; i < nr_ctrs; ++i) {
+               int offset = i + si;
+               if (counters[offset].addr) {
+                       wrmsr(counters[offset].addr,
+                               counters[offset].multiplex.low,
+                               counters[offset].multiplex.high);
+               }
+       }
+}
+
+static void nmi_cpu_restore_registers(struct op_msrs *msrs)
 {
        unsigned int const nr_ctrs = model->num_counters;
        unsigned int const nr_ctrls = model->num_controls;
@@ -264,14 +345,15 @@ static void nmi_cpu_shutdown(void *dummy)
        apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
        apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
        apic_write(APIC_LVTERR, v);
-       nmi_restore_registers(msrs);
+       nmi_cpu_restore_registers(msrs);
+       __get_cpu_var(switch_index) = 0;
 }
 
 static void nmi_shutdown(void)
 {
        struct op_msrs *msrs = &get_cpu_var(cpu_msrs);
        nmi_enabled = 0;
-       on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1);
+       on_each_cpu(nmi_cpu_shutdown, NULL, 1);
        unregister_die_notifier(&profile_exceptions_nb);
        model->shutdown(msrs);
        free_msrs();
@@ -286,7 +368,7 @@ static void nmi_cpu_start(void *dummy)
 
 static int nmi_start(void)
 {
-       on_each_cpu(nmi_cpu_start, NULL, 0, 1);
+       on_each_cpu(nmi_cpu_start, NULL, 1);
        return 0;
 }
 
@@ -298,7 +380,7 @@ static void nmi_cpu_stop(void *dummy)
 
 static void nmi_stop(void)
 {
-       on_each_cpu(nmi_cpu_stop, NULL, 0, 1);
+       on_each_cpu(nmi_cpu_stop, NULL, 1);
 }
 
 struct op_counter_config counter_config[OP_MAX_COUNTER];
@@ -327,6 +409,7 @@ static int nmi_create_files(struct super_block *sb, struct dentry *root)
                oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
                oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
                oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
+               counter_config[i].save_count_low = 0;
        }
 
        return 0;
@@ -369,20 +452,34 @@ static int __init ppro_init(char **cpu_type)
 {
        __u8 cpu_model = boot_cpu_data.x86_model;
 
-       if (cpu_model == 14)
+       switch (cpu_model) {
+       case 0 ... 2:
+               *cpu_type = "i386/ppro";
+               break;
+       case 3 ... 5:
+               *cpu_type = "i386/pii";
+               break;
+       case 6 ... 8:
+               *cpu_type = "i386/piii";
+               break;
+       case 9:
+               *cpu_type = "i386/p6_mobile";
+               break;
+       case 10 ... 13:
+               *cpu_type = "i386/p6";
+               break;
+       case 14:
                *cpu_type = "i386/core";
-       else if (cpu_model == 15 || cpu_model == 23)
+               break;
+       case 15: case 23:
+               *cpu_type = "i386/core_2";
+               break;
+       case 26:
                *cpu_type = "i386/core_2";
-       else if (cpu_model > 0xd)
+               break;
+       default:
+               /* Unknown */
                return 0;
-       else if (cpu_model == 9) {
-               *cpu_type = "i386/p6_mobile";
-       } else if (cpu_model > 5) {
-               *cpu_type = "i386/piii";
-       } else if (cpu_model > 2) {
-               *cpu_type = "i386/pii";
-       } else {
-               *cpu_type = "i386/ppro";
        }
 
        model = &op_ppro_spec;
@@ -397,6 +494,7 @@ int __init op_nmi_init(struct oprofile_operations *ops)
        __u8 vendor = boot_cpu_data.x86_vendor;
        __u8 family = boot_cpu_data.x86;
        char *cpu_type;
+       int ret = 0;
 
        if (!cpu_has_apic)
                return -ENODEV;
@@ -409,19 +507,23 @@ int __init op_nmi_init(struct oprofile_operations *ops)
                default:
                        return -ENODEV;
                case 6:
-                       model = &op_athlon_spec;
+                       model = &op_amd_spec;
                        cpu_type = "i386/athlon";
                        break;
                case 0xf:
-                       model = &op_athlon_spec;
+                       model = &op_amd_spec;
                        /* Actually it could be i386/hammer too, but give
                         user space an consistent name. */
                        cpu_type = "x86-64/hammer";
                        break;
                case 0x10:
-                       model = &op_athlon_spec;
+                       model = &op_amd_spec;
                        cpu_type = "x86-64/family10";
                        break;
+               case 0x11:
+                       model = &op_amd_spec;
+                       cpu_type = "x86-64/family11h";
+                       break;
                }
                break;
 
@@ -448,14 +550,23 @@ int __init op_nmi_init(struct oprofile_operations *ops)
                return -ENODEV;
        }
 
-       init_sysfs();
-       using_nmi = 1;
+       /* default values, can be overwritten by model */
+       __get_cpu_var(switch_index) = 0;
        ops->create_files = nmi_create_files;
        ops->setup = nmi_setup;
        ops->shutdown = nmi_shutdown;
        ops->start = nmi_start;
        ops->stop = nmi_stop;
        ops->cpu_type = cpu_type;
+       ops->switch_events = nmi_switch_event;
+
+       if (model->init)
+               ret = model->init(ops);
+       if (ret)
+               return ret;
+
+       init_sysfs();
+       using_nmi = 1;
        printk(KERN_INFO "oprofile: using NMI interrupt.\n");
        return 0;
 }
@@ -464,4 +575,6 @@ void op_nmi_exit(void)
 {
        if (using_nmi)
                exit_sysfs();
+       if (model->exit)
+               model->exit();
 }