]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - arch/x86/kernel/apic/x2apic_uv_x.c
Merge branch 'x86/uv'
[karo-tx-linux.git] / arch / x86 / kernel / apic / x2apic_uv_x.c
index a419814cea575f9e2cf183772a9e99c59e097a11..ad0dc0428baf8cc4550f339f78e4552ef40020fb 100644 (file)
 #include <asm/x86_init.h>
 #include <asm/nmi.h>
 
-/* BMC sets a bit this MMR non-zero before sending an NMI */
-#define UVH_NMI_MMR                            UVH_SCRATCH5
-#define UVH_NMI_MMR_CLEAR                      (UVH_NMI_MMR + 8)
-#define UV_NMI_PENDING_MASK                    (1UL << 63)
-DEFINE_PER_CPU(unsigned long, cpu_last_nmi_count);
-
 DEFINE_PER_CPU(int, x2apic_extra_bits);
 
 #define PR_DEVEL(fmt, args...) pr_devel("%s: " fmt, __func__, args)
@@ -58,7 +52,6 @@ int uv_min_hub_revision_id;
 EXPORT_SYMBOL_GPL(uv_min_hub_revision_id);
 unsigned int uv_apicid_hibits;
 EXPORT_SYMBOL_GPL(uv_apicid_hibits);
-static DEFINE_SPINLOCK(uv_nmi_lock);
 
 static struct apic apic_x2apic_uv_x;
 
@@ -847,68 +840,6 @@ void uv_cpu_init(void)
                set_x2apic_extra_bits(uv_hub_info->pnode);
 }
 
-/*
- * When NMI is received, print a stack trace.
- */
-int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
-{
-       unsigned long real_uv_nmi;
-       int bid;
-
-       /*
-        * Each blade has an MMR that indicates when an NMI has been sent
-        * to cpus on the blade. If an NMI is detected, atomically
-        * clear the MMR and update a per-blade NMI count used to
-        * cause each cpu on the blade to notice a new NMI.
-        */
-       bid = uv_numa_blade_id();
-       real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK);
-
-       if (unlikely(real_uv_nmi)) {
-               spin_lock(&uv_blade_info[bid].nmi_lock);
-               real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK);
-               if (real_uv_nmi) {
-                       uv_blade_info[bid].nmi_count++;
-                       uv_write_local_mmr(UVH_NMI_MMR_CLEAR, UV_NMI_PENDING_MASK);
-               }
-               spin_unlock(&uv_blade_info[bid].nmi_lock);
-       }
-
-       if (likely(__get_cpu_var(cpu_last_nmi_count) == uv_blade_info[bid].nmi_count))
-               return NMI_DONE;
-
-       __get_cpu_var(cpu_last_nmi_count) = uv_blade_info[bid].nmi_count;
-
-       /*
-        * Use a lock so only one cpu prints at a time.
-        * This prevents intermixed output.
-        */
-       spin_lock(&uv_nmi_lock);
-       pr_info("UV NMI stack dump cpu %u:\n", smp_processor_id());
-       dump_stack();
-       spin_unlock(&uv_nmi_lock);
-
-       return NMI_HANDLED;
-}
-
-void uv_register_nmi_notifier(void)
-{
-       if (register_nmi_handler(NMI_UNKNOWN, uv_handle_nmi, 0, "uv"))
-               printk(KERN_WARNING "UV NMI handler failed to register\n");
-}
-
-void uv_nmi_init(void)
-{
-       unsigned int value;
-
-       /*
-        * Unmask NMI on all cpus
-        */
-       value = apic_read(APIC_LVT1) | APIC_DM_NMI;
-       value &= ~APIC_LVT_MASKED;
-       apic_write(APIC_LVT1, value);
-}
-
 void __init uv_system_init(void)
 {
        union uvh_rh_gam_config_mmr_u  m_n_config;
@@ -1046,6 +977,7 @@ void __init uv_system_init(void)
        map_mmr_high(max_pnode);
        map_mmioh_high(min_pnode, max_pnode);
 
+       uv_nmi_setup();
        uv_cpu_init();
        uv_scir_register_cpu_notifier();
        uv_register_nmi_notifier();