]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - arch/x86/oprofile/nmi_int.c
Merge commit 'v2.6.27-rc8' into oprofile
[mv-sheeva.git] / arch / x86 / oprofile / nmi_int.c
index b29819313f2b207b88d2535eb6d36432fbfdb871..114df508b407b78884a1c98e653a171b76f17d7c 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/slab.h>
 #include <linux/moduleparam.h>
 #include <linux/kdebug.h>
+#include <linux/cpu.h>
 #include <asm/nmi.h>
 #include <asm/msr.h>
 #include <asm/apic.h>
 #include "op_counter.h"
 #include "op_x86_model.h"
 
+DEFINE_PER_CPU(int, switch_index);
+
 static struct op_x86_model_spec const *model;
 static DEFINE_PER_CPU(struct op_msrs, cpu_msrs);
 static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
 
 static int nmi_start(void);
 static void nmi_stop(void);
+static void nmi_cpu_start(void *dummy);
+static void nmi_cpu_stop(void *dummy);
+static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs);
+static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs);
 
 /* 0 == registered but off, 1 == registered and on */
 static int nmi_enabled = 0;
 
+#ifdef CONFIG_SMP
+static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
+                                void *data)
+{
+       int cpu = (unsigned long)data;
+       switch (action) {
+       case CPU_DOWN_FAILED:
+       case CPU_ONLINE:
+               smp_call_function_single(cpu, nmi_cpu_start, NULL, 0);
+               break;
+       case CPU_DOWN_PREPARE:
+               smp_call_function_single(cpu, nmi_cpu_stop, NULL, 1);
+               break;
+       }
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block oprofile_cpu_nb = {
+       .notifier_call = oprofile_cpu_notifier
+};
+#endif
+
 #ifdef CONFIG_PM
 
 static int nmi_suspend(struct sys_device *dev, pm_message_t state)
 {
+       /* Only one CPU left, just stop that one */
        if (nmi_enabled == 1)
-               nmi_stop();
+               nmi_cpu_stop(NULL);
        return 0;
 }
 
 static int nmi_resume(struct sys_device *dev)
 {
        if (nmi_enabled == 1)
-               nmi_start();
+               nmi_cpu_start(NULL);
        return 0;
 }
 
@@ -81,6 +111,47 @@ static void exit_sysfs(void)
 #define exit_sysfs() do { } while (0)
 #endif /* CONFIG_PM */
 
+static void nmi_cpu_switch(void *dummy)
+{
+       int cpu = smp_processor_id();
+       int si = per_cpu(switch_index, cpu);
+       struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
+
+       nmi_cpu_stop(NULL);
+       nmi_cpu_save_mpx_registers(msrs);
+
+       /* move to next set */
+       si += model->num_hardware_counters;
+       if ((si > model->num_counters) || (counter_config[si].count == 0))
+               per_cpu(switch_index, smp_processor_id()) = 0;
+       else
+               per_cpu(switch_index, smp_processor_id()) = si;
+
+       nmi_cpu_restore_mpx_registers(msrs);
+       model->setup_ctrs(msrs);
+       nmi_cpu_start(NULL);
+}
+
+/*
+ * Quick check to see if multiplexing is necessary.
+ * The check should be sufficient since counters are used
+ * in ordre.
+ */
+static int nmi_multiplex_on(void)
+{
+       return counter_config[model->num_hardware_counters].count ? 0 : -EINVAL;
+}
+
+static int nmi_switch_event(void)
+{
+       if (nmi_multiplex_on() < 0)
+               return -EINVAL;
+
+       on_each_cpu(nmi_cpu_switch, NULL, 1);
+
+       return 0;
+}
+
 static int profile_exceptions_notify(struct notifier_block *self,
                                     unsigned long val, void *data)
 {
@@ -144,11 +215,10 @@ static void free_msrs(void)
 
 static int allocate_msrs(void)
 {
-       int success = 1;
+       int i, success = 1;
        size_t controls_size = sizeof(struct op_msr) * model->num_controls;
        size_t counters_size = sizeof(struct op_msr) * model->num_counters;
 
-       int i;
        for_each_possible_cpu(i) {
                per_cpu(cpu_msrs, i).counters = kmalloc(counters_size,
                                                                GFP_KERNEL);
@@ -156,8 +226,8 @@ static int allocate_msrs(void)
                        success = 0;
                        break;
                }
-               per_cpu(cpu_msrs, i).controls = kmalloc(controls_size,
-                                                               GFP_KERNEL);
+               per_cpu(cpu_msrs, i).controls =
+                               kmalloc(controls_size, GFP_KERNEL);
                if (!per_cpu(cpu_msrs, i).controls) {
                        success = 0;
                        break;
@@ -201,7 +271,8 @@ static int nmi_setup(void)
                return err;
        }
 
-       /* We need to serialize save and setup for HT because the subset
+       /*
+        * We need to serialize save and setup for HT because the subset
         * of msrs are distinct for save and setup operations
         */
 
@@ -217,7 +288,6 @@ static int nmi_setup(void)
                                per_cpu(cpu_msrs, 0).controls,
                                sizeof(struct op_msr) * model->num_controls);
                }
-
        }
        on_each_cpu(nmi_save_registers, NULL, 1);
        on_each_cpu(nmi_cpu_setup, NULL, 1);
@@ -225,7 +295,41 @@ static int nmi_setup(void)
        return 0;
 }
 
-static void nmi_restore_registers(struct op_msrs *msrs)
+static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs)
+{
+       unsigned int si = __get_cpu_var(switch_index);
+       unsigned int const nr_ctrs = model->num_hardware_counters;
+       struct op_msr *counters = &msrs->counters[si];
+       unsigned int i;
+
+       for (i = 0; i < nr_ctrs; ++i) {
+               int offset = i + si;
+               if (counters[offset].addr) {
+                       rdmsr(counters[offset].addr,
+                               counters[offset].multiplex.low,
+                               counters[offset].multiplex.high);
+               }
+       }
+}
+
+static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs)
+{
+       unsigned int si = __get_cpu_var(switch_index);
+       unsigned int const nr_ctrs = model->num_hardware_counters;
+       struct op_msr *counters = &msrs->counters[si];
+       unsigned int i;
+
+       for (i = 0; i < nr_ctrs; ++i) {
+               int offset = i + si;
+               if (counters[offset].addr) {
+                       wrmsr(counters[offset].addr,
+                               counters[offset].multiplex.low,
+                               counters[offset].multiplex.high);
+               }
+       }
+}
+
+static void nmi_cpu_restore_registers(struct op_msrs *msrs)
 {
        unsigned int const nr_ctrs = model->num_counters;
        unsigned int const nr_ctrls = model->num_controls;
@@ -265,15 +369,18 @@ static void nmi_cpu_shutdown(void *dummy)
        apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
        apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
        apic_write(APIC_LVTERR, v);
-       nmi_restore_registers(msrs);
+       nmi_cpu_restore_registers(msrs);
+       __get_cpu_var(switch_index) = 0;
 }
 
 static void nmi_shutdown(void)
 {
-       struct op_msrs *msrs = &get_cpu_var(cpu_msrs);
+       struct op_msrs *msrs;
+
        nmi_enabled = 0;
        on_each_cpu(nmi_cpu_shutdown, NULL, 1);
        unregister_die_notifier(&profile_exceptions_nb);
+       msrs = &get_cpu_var(cpu_msrs);
        model->shutdown(msrs);
        free_msrs();
        put_cpu_var(cpu_msrs);
@@ -328,6 +435,7 @@ static int nmi_create_files(struct super_block *sb, struct dentry *root)
                oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
                oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
                oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
+               counter_config[i].save_count_low = 0;
        }
 
        return 0;
@@ -468,6 +576,19 @@ int __init op_nmi_init(struct oprofile_operations *ops)
                return -ENODEV;
        }
 
+#ifdef CONFIG_SMP
+       register_cpu_notifier(&oprofile_cpu_nb);
+#endif
+       /* default values, can be overwritten by model */
+       __raw_get_cpu_var(switch_index) = 0;
+       ops->create_files = nmi_create_files;
+       ops->setup = nmi_setup;
+       ops->shutdown = nmi_shutdown;
+       ops->start = nmi_start;
+       ops->stop = nmi_stop;
+       ops->cpu_type = cpu_type;
+       ops->switch_events = nmi_switch_event;
+
        if (model->init)
                ret = model->init(ops);
        if (ret)
@@ -475,20 +596,18 @@ int __init op_nmi_init(struct oprofile_operations *ops)
 
        init_sysfs();
        using_nmi = 1;
-       ops->create_files = nmi_create_files;
-       ops->setup = nmi_setup;
-       ops->shutdown = nmi_shutdown;
-       ops->start = nmi_start;
-       ops->stop = nmi_stop;
-       ops->cpu_type = cpu_type;
        printk(KERN_INFO "oprofile: using NMI interrupt.\n");
        return 0;
 }
 
 void op_nmi_exit(void)
 {
-       if (using_nmi)
+       if (using_nmi) {
                exit_sysfs();
+#ifdef CONFIG_SMP
+               unregister_cpu_notifier(&oprofile_cpu_nb);
+#endif
        if (model->exit)
                model->exit();
+       }
 }