2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * SGI UV APIC functions (note: not an Intel compatible APIC)
8 * Copyright (C) 2007-2013 Silicon Graphics, Inc. All rights reserved.
10 #include <linux/cpumask.h>
11 #include <linux/hardirq.h>
12 #include <linux/proc_fs.h>
13 #include <linux/threads.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/string.h>
17 #include <linux/ctype.h>
18 #include <linux/sched.h>
19 #include <linux/timer.h>
20 #include <linux/slab.h>
21 #include <linux/cpu.h>
22 #include <linux/init.h>
24 #include <linux/pci.h>
25 #include <linux/kdebug.h>
26 #include <linux/delay.h>
27 #include <linux/crash_dump.h>
29 #include <asm/uv/uv_mmrs.h>
30 #include <asm/uv/uv_hub.h>
31 #include <asm/current.h>
32 #include <asm/pgtable.h>
33 #include <asm/uv/bios.h>
34 #include <asm/uv/uv.h>
38 #include <asm/x86_init.h>
39 #include <asm/emergency-restart.h>
42 /* BMC sets a bit this MMR non-zero before sending an NMI */
43 #define UVH_NMI_MMR UVH_SCRATCH5
44 #define UVH_NMI_MMR_CLEAR (UVH_NMI_MMR + 8)
45 #define UV_NMI_PENDING_MASK (1UL << 63)
46 DEFINE_PER_CPU(unsigned long, cpu_last_nmi_count);
48 DEFINE_PER_CPU(int, x2apic_extra_bits);
50 #define PR_DEVEL(fmt, args...) pr_devel("%s: " fmt, __func__, args)
52 static enum uv_system_type uv_system_type;
53 static u64 gru_start_paddr, gru_end_paddr;
54 static union uvh_apicid uvh_apicid;
55 int uv_min_hub_revision_id;
56 EXPORT_SYMBOL_GPL(uv_min_hub_revision_id);
57 unsigned int uv_apicid_hibits;
58 EXPORT_SYMBOL_GPL(uv_apicid_hibits);
59 static DEFINE_SPINLOCK(uv_nmi_lock);
61 static struct apic apic_x2apic_uv_x;
63 static unsigned long __init uv_early_read_mmr(unsigned long addr)
65 unsigned long val, *mmr;
67 mmr = early_ioremap(UV_LOCAL_MMR_BASE | addr, sizeof(*mmr));
69 early_iounmap(mmr, sizeof(*mmr));
73 static inline bool is_GRU_range(u64 start, u64 end)
75 return start >= gru_start_paddr && end <= gru_end_paddr;
78 static bool uv_is_untracked_pat_range(u64 start, u64 end)
80 return is_ISA_range(start, end) || is_GRU_range(start, end);
83 static int __init early_get_pnodeid(void)
85 union uvh_node_id_u node_id;
86 union uvh_rh_gam_config_mmr_u m_n_config;
89 /* Currently, all blades have same revision number */
90 node_id.v = uv_early_read_mmr(UVH_NODE_ID);
91 m_n_config.v = uv_early_read_mmr(UVH_RH_GAM_CONFIG_MMR);
92 uv_min_hub_revision_id = node_id.s.revision;
94 switch (node_id.s.part_number) {
95 case UV2_HUB_PART_NUMBER:
96 case UV2_HUB_PART_NUMBER_X:
97 uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1;
99 case UV3_HUB_PART_NUMBER:
100 case UV3_HUB_PART_NUMBER_X:
101 uv_min_hub_revision_id += UV3_HUB_REVISION_BASE - 1;
105 uv_hub_info->hub_revision = uv_min_hub_revision_id;
106 pnode = (node_id.s.node_id >> 1) & ((1 << m_n_config.s.n_skt) - 1);
110 static void __init early_get_apic_pnode_shift(void)
112 uvh_apicid.v = uv_early_read_mmr(UVH_APICID);
115 * Old bios, use default value
117 uvh_apicid.s.pnode_shift = UV_APIC_PNODE_SHIFT;
121 * Add an extra bit as dictated by bios to the destination apicid of
122 * interrupts potentially passing through the UV HUB. This prevents
123 * a deadlock between interrupts and IO port operations.
125 static void __init uv_set_apicid_hibit(void)
127 union uv1h_lb_target_physical_apic_id_mask_u apicid_mask;
131 uv_early_read_mmr(UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK);
133 apicid_mask.s1.bit_enables & UV_APICID_HIBIT_MASK;
137 static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
139 int pnodeid, is_uv1, is_uv2, is_uv3;
141 is_uv1 = !strcmp(oem_id, "SGI");
142 is_uv2 = !strcmp(oem_id, "SGI2");
143 is_uv3 = !strncmp(oem_id, "SGI3", 4); /* there are varieties of UV3 */
144 if (is_uv1 || is_uv2 || is_uv3) {
145 uv_hub_info->hub_revision =
146 (is_uv1 ? UV1_HUB_REVISION_BASE :
147 (is_uv2 ? UV2_HUB_REVISION_BASE :
148 UV3_HUB_REVISION_BASE));
149 pnodeid = early_get_pnodeid();
150 early_get_apic_pnode_shift();
151 x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range;
152 x86_platform.nmi_init = uv_nmi_init;
153 if (!strcmp(oem_table_id, "UVL"))
154 uv_system_type = UV_LEGACY_APIC;
155 else if (!strcmp(oem_table_id, "UVX"))
156 uv_system_type = UV_X2APIC;
157 else if (!strcmp(oem_table_id, "UVH")) {
158 __this_cpu_write(x2apic_extra_bits,
159 pnodeid << uvh_apicid.s.pnode_shift);
160 uv_system_type = UV_NON_UNIQUE_APIC;
161 uv_set_apicid_hibit();
168 enum uv_system_type get_uv_system_type(void)
170 return uv_system_type;
173 int is_uv_system(void)
175 return uv_system_type != UV_NONE;
177 EXPORT_SYMBOL_GPL(is_uv_system);
179 DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
180 EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info);
182 struct uv_blade_info *uv_blade_info;
183 EXPORT_SYMBOL_GPL(uv_blade_info);
185 short *uv_node_to_blade;
186 EXPORT_SYMBOL_GPL(uv_node_to_blade);
188 short *uv_cpu_to_blade;
189 EXPORT_SYMBOL_GPL(uv_cpu_to_blade);
191 short uv_possible_blades;
192 EXPORT_SYMBOL_GPL(uv_possible_blades);
194 unsigned long sn_rtc_cycles_per_second;
195 EXPORT_SYMBOL(sn_rtc_cycles_per_second);
197 static int __cpuinit uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
203 pnode = uv_apicid_to_pnode(phys_apicid);
204 phys_apicid |= uv_apicid_hibits;
205 val = (1UL << UVH_IPI_INT_SEND_SHFT) |
206 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
207 ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
209 uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
211 val = (1UL << UVH_IPI_INT_SEND_SHFT) |
212 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
213 ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
215 uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
217 atomic_set(&init_deasserted, 1);
222 static void uv_send_IPI_one(int cpu, int vector)
224 unsigned long apicid;
227 apicid = per_cpu(x86_cpu_to_apicid, cpu);
228 pnode = uv_apicid_to_pnode(apicid);
229 uv_hub_send_ipi(pnode, apicid, vector);
232 static void uv_send_IPI_mask(const struct cpumask *mask, int vector)
236 for_each_cpu(cpu, mask)
237 uv_send_IPI_one(cpu, vector);
240 static void uv_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
242 unsigned int this_cpu = smp_processor_id();
245 for_each_cpu(cpu, mask) {
247 uv_send_IPI_one(cpu, vector);
251 static void uv_send_IPI_allbutself(int vector)
253 unsigned int this_cpu = smp_processor_id();
256 for_each_online_cpu(cpu) {
258 uv_send_IPI_one(cpu, vector);
262 static void uv_send_IPI_all(int vector)
264 uv_send_IPI_mask(cpu_online_mask, vector);
267 static int uv_apic_id_valid(int apicid)
272 static int uv_apic_id_registered(void)
277 static void uv_init_apic_ldr(void)
282 uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
283 const struct cpumask *andmask,
284 unsigned int *apicid)
289 * We're using fixed IRQ delivery, can only return one phys APIC ID.
290 * May as well be the first.
292 for_each_cpu_and(cpu, cpumask, andmask) {
293 if (cpumask_test_cpu(cpu, cpu_online_mask))
297 if (likely(cpu < nr_cpu_ids)) {
298 *apicid = per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits;
305 static unsigned int x2apic_get_apic_id(unsigned long x)
309 WARN_ON(preemptible() && num_online_cpus() > 1);
310 id = x | __this_cpu_read(x2apic_extra_bits);
315 static unsigned long set_apic_id(unsigned int id)
319 /* maskout x2apic_extra_bits ? */
324 static unsigned int uv_read_apic_id(void)
327 return x2apic_get_apic_id(apic_read(APIC_ID));
330 static int uv_phys_pkg_id(int initial_apicid, int index_msb)
332 return uv_read_apic_id() >> index_msb;
335 static void uv_send_IPI_self(int vector)
337 apic_write(APIC_SELF_IPI, vector);
340 static int uv_probe(void)
342 return apic == &apic_x2apic_uv_x;
345 static struct apic __refdata apic_x2apic_uv_x = {
347 .name = "UV large system",
349 .acpi_madt_oem_check = uv_acpi_madt_oem_check,
350 .apic_id_valid = uv_apic_id_valid,
351 .apic_id_registered = uv_apic_id_registered,
353 .irq_delivery_mode = dest_Fixed,
354 .irq_dest_mode = 0, /* physical */
356 .target_cpus = online_target_cpus,
358 .dest_logical = APIC_DEST_LOGICAL,
359 .check_apicid_used = NULL,
360 .check_apicid_present = NULL,
362 .vector_allocation_domain = default_vector_allocation_domain,
363 .init_apic_ldr = uv_init_apic_ldr,
365 .ioapic_phys_id_map = NULL,
366 .setup_apic_routing = NULL,
367 .multi_timer_check = NULL,
368 .cpu_present_to_apicid = default_cpu_present_to_apicid,
369 .apicid_to_cpu_present = NULL,
370 .setup_portio_remap = NULL,
371 .check_phys_apicid_present = default_check_phys_apicid_present,
372 .enable_apic_mode = NULL,
373 .phys_pkg_id = uv_phys_pkg_id,
374 .mps_oem_check = NULL,
376 .get_apic_id = x2apic_get_apic_id,
377 .set_apic_id = set_apic_id,
378 .apic_id_mask = 0xFFFFFFFFu,
380 .cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and,
382 .send_IPI_mask = uv_send_IPI_mask,
383 .send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself,
384 .send_IPI_allbutself = uv_send_IPI_allbutself,
385 .send_IPI_all = uv_send_IPI_all,
386 .send_IPI_self = uv_send_IPI_self,
388 .wakeup_secondary_cpu = uv_wakeup_secondary,
389 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
390 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
391 .wait_for_init_deassert = NULL,
392 .smp_callin_clear_local_apic = NULL,
393 .inquire_remote_apic = NULL,
395 .read = native_apic_msr_read,
396 .write = native_apic_msr_write,
397 .eoi_write = native_apic_msr_eoi_write,
398 .icr_read = native_x2apic_icr_read,
399 .icr_write = native_x2apic_icr_write,
400 .wait_icr_idle = native_x2apic_wait_icr_idle,
401 .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
404 static __cpuinit void set_x2apic_extra_bits(int pnode)
406 __this_cpu_write(x2apic_extra_bits, pnode << uvh_apicid.s.pnode_shift);
410 * Called on boot cpu.
412 static __init int boot_pnode_to_blade(int pnode)
416 for (blade = 0; blade < uv_num_possible_blades(); blade++)
417 if (pnode == uv_blade_info[blade].pnode)
423 unsigned long redirect;
427 #define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT
429 static __initdata struct redir_addr redir_addrs[] = {
430 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR},
431 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR},
432 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR},
435 static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
437 union uvh_rh_gam_alias210_overlay_config_2_mmr_u alias;
438 union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect;
441 for (i = 0; i < ARRAY_SIZE(redir_addrs); i++) {
442 alias.v = uv_read_local_mmr(redir_addrs[i].alias);
443 if (alias.s.enable && alias.s.base == 0) {
444 *size = (1UL << alias.s.m_alias);
445 redirect.v = uv_read_local_mmr(redir_addrs[i].redirect);
446 *base = (unsigned long)redirect.s.dest_base << DEST_SHIFT;
453 enum map_type {map_wb, map_uc};
455 static __init void map_high(char *id, unsigned long base, int pshift,
456 int bshift, int max_pnode, enum map_type map_type)
458 unsigned long bytes, paddr;
460 paddr = base << pshift;
461 bytes = (1UL << bshift) * (max_pnode + 1);
463 pr_info("UV: Map %s_HI base address NULL\n", id);
466 pr_info("UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr, paddr + bytes);
467 if (map_type == map_uc)
468 init_extra_mapping_uc(paddr, bytes);
470 init_extra_mapping_wb(paddr, bytes);
473 static __init void map_gru_high(int max_pnode)
475 union uvh_rh_gam_gru_overlay_config_mmr_u gru;
476 int shift = UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT;
478 gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR);
480 map_high("GRU", gru.s.base, shift, shift, max_pnode, map_wb);
481 gru_start_paddr = ((u64)gru.s.base << shift);
482 gru_end_paddr = gru_start_paddr + (1UL << shift) * (max_pnode + 1);
484 pr_info("UV: GRU disabled\n");
488 static __init void map_mmr_high(int max_pnode)
490 union uvh_rh_gam_mmr_overlay_config_mmr_u mmr;
491 int shift = UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT;
493 mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR);
495 map_high("MMR", mmr.s.base, shift, shift, max_pnode, map_uc);
497 pr_info("UV: MMR disabled\n");
501 * This commonality works because both 0 & 1 versions of the MMIOH OVERLAY
502 * and REDIRECT MMR regs are exactly the same on UV3.
504 struct mmioh_config {
505 unsigned long overlay;
506 unsigned long redirect;
510 static __initdata struct mmioh_config mmiohs[] = {
512 UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR,
513 UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR,
517 UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR,
518 UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR,
523 static __init void map_mmioh_high_uv3(int index, int min_pnode, int max_pnode)
525 union uv3h_rh_gam_mmioh_overlay_config0_mmr_u overlay;
528 int i, n, shift, m_io, max_io;
529 int nasid, lnasid, fi, li;
532 id = mmiohs[index].id;
533 overlay.v = uv_read_local_mmr(mmiohs[index].overlay);
534 pr_info("UV: %s overlay 0x%lx base:0x%x m_io:%d\n",
535 id, overlay.v, overlay.s3.base, overlay.s3.m_io);
536 if (!overlay.s3.enable) {
537 pr_info("UV: %s disabled\n", id);
541 shift = UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_SHFT;
542 base = (unsigned long)overlay.s3.base;
543 m_io = overlay.s3.m_io;
544 mmr = mmiohs[index].redirect;
545 n = UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH;
546 min_pnode *= 2; /* convert to NASID */
548 max_io = lnasid = fi = li = -1;
550 for (i = 0; i < n; i++) {
551 union uv3h_rh_gam_mmioh_redirect_config0_mmr_u redirect;
553 redirect.v = uv_read_local_mmr(mmr + i * 8);
554 nasid = redirect.s3.nasid;
555 if (nasid < min_pnode || max_pnode < nasid)
556 nasid = -1; /* invalid NASID */
558 if (nasid == lnasid) {
560 if (i != n-1) /* last entry check */
564 /* check if we have a cached (or last) redirect to print */
565 if (lnasid != -1 || (i == n-1 && nasid != -1)) {
566 unsigned long addr1, addr2;
576 addr1 = (base << shift) +
577 f * (unsigned long)(1 << m_io);
578 addr2 = (base << shift) +
579 (l + 1) * (unsigned long)(1 << m_io);
580 pr_info("UV: %s[%03d..%03d] NASID 0x%04x ADDR 0x%016lx - 0x%016lx\n",
581 id, fi, li, lnasid, addr1, addr2);
589 pr_info("UV: %s base:0x%lx shift:%d M_IO:%d MAX_IO:%d\n",
590 id, base, shift, m_io, max_io);
593 map_high(id, base, shift, m_io, max_io, map_uc);
596 static __init void map_mmioh_high(int min_pnode, int max_pnode)
598 union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh;
599 unsigned long mmr, base;
600 int shift, enable, m_io, n_io;
603 /* Map both MMIOH Regions */
604 map_mmioh_high_uv3(0, min_pnode, max_pnode);
605 map_mmioh_high_uv3(1, min_pnode, max_pnode);
610 mmr = UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR;
611 shift = UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
612 mmioh.v = uv_read_local_mmr(mmr);
613 enable = !!mmioh.s1.enable;
614 base = mmioh.s1.base;
615 m_io = mmioh.s1.m_io;
616 n_io = mmioh.s1.n_io;
617 } else if (is_uv2_hub()) {
618 mmr = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR;
619 shift = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
620 mmioh.v = uv_read_local_mmr(mmr);
621 enable = !!mmioh.s2.enable;
622 base = mmioh.s2.base;
623 m_io = mmioh.s2.m_io;
624 n_io = mmioh.s2.n_io;
629 max_pnode &= (1 << n_io) - 1;
631 "UV: base:0x%lx shift:%d N_IO:%d M_IO:%d max_pnode:0x%x\n",
632 base, shift, m_io, n_io, max_pnode);
633 map_high("MMIOH", base, shift, m_io, max_pnode, map_uc);
635 pr_info("UV: MMIOH disabled\n");
639 static __init void map_low_mmrs(void)
641 init_extra_mapping_uc(UV_GLOBAL_MMR32_BASE, UV_GLOBAL_MMR32_SIZE);
642 init_extra_mapping_uc(UV_LOCAL_MMR_BASE, UV_LOCAL_MMR_SIZE);
645 static __init void uv_rtc_init(void)
650 status = uv_bios_freq_base(BIOS_FREQ_BASE_REALTIME_CLOCK,
652 if (status != BIOS_STATUS_SUCCESS || ticks_per_sec < 100000) {
654 "unable to determine platform RTC clock frequency, "
656 /* BIOS gives wrong value for clock freq. so guess */
657 sn_rtc_cycles_per_second = 1000000000000UL / 30000UL;
659 sn_rtc_cycles_per_second = ticks_per_sec;
663 * percpu heartbeat timer
665 static void uv_heartbeat(unsigned long ignored)
667 struct timer_list *timer = &uv_hub_info->scir.timer;
668 unsigned char bits = uv_hub_info->scir.state;
670 /* flip heartbeat bit */
671 bits ^= SCIR_CPU_HEARTBEAT;
673 /* is this cpu idle? */
674 if (idle_cpu(raw_smp_processor_id()))
675 bits &= ~SCIR_CPU_ACTIVITY;
677 bits |= SCIR_CPU_ACTIVITY;
679 /* update system controller interface reg */
680 uv_set_scir_bits(bits);
682 /* enable next timer period */
683 mod_timer_pinned(timer, jiffies + SCIR_CPU_HB_INTERVAL);
686 static void __cpuinit uv_heartbeat_enable(int cpu)
688 while (!uv_cpu_hub_info(cpu)->scir.enabled) {
689 struct timer_list *timer = &uv_cpu_hub_info(cpu)->scir.timer;
691 uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY);
692 setup_timer(timer, uv_heartbeat, cpu);
693 timer->expires = jiffies + SCIR_CPU_HB_INTERVAL;
694 add_timer_on(timer, cpu);
695 uv_cpu_hub_info(cpu)->scir.enabled = 1;
697 /* also ensure that boot cpu is enabled */
702 #ifdef CONFIG_HOTPLUG_CPU
703 static void __cpuinit uv_heartbeat_disable(int cpu)
705 if (uv_cpu_hub_info(cpu)->scir.enabled) {
706 uv_cpu_hub_info(cpu)->scir.enabled = 0;
707 del_timer(&uv_cpu_hub_info(cpu)->scir.timer);
709 uv_set_cpu_scir_bits(cpu, 0xff);
713 * cpu hotplug notifier
715 static __cpuinit int uv_scir_cpu_notify(struct notifier_block *self,
716 unsigned long action, void *hcpu)
718 long cpu = (long)hcpu;
722 uv_heartbeat_enable(cpu);
724 case CPU_DOWN_PREPARE:
725 uv_heartbeat_disable(cpu);
733 static __init void uv_scir_register_cpu_notifier(void)
735 hotcpu_notifier(uv_scir_cpu_notify, 0);
738 #else /* !CONFIG_HOTPLUG_CPU */
740 static __init void uv_scir_register_cpu_notifier(void)
744 static __init int uv_init_heartbeat(void)
749 for_each_online_cpu(cpu)
750 uv_heartbeat_enable(cpu);
754 late_initcall(uv_init_heartbeat);
756 #endif /* !CONFIG_HOTPLUG_CPU */
758 /* Direct Legacy VGA I/O traffic to designated IOH */
759 int uv_set_vga_state(struct pci_dev *pdev, bool decode,
760 unsigned int command_bits, u32 flags)
764 PR_DEVEL("devfn %x decode %d cmd %x flags %d\n",
765 pdev->devfn, decode, command_bits, flags);
767 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
770 if ((command_bits & PCI_COMMAND_IO) == 0)
773 domain = pci_domain_nr(pdev->bus);
774 bus = pdev->bus->number;
776 rc = uv_bios_set_legacy_vga_target(decode, domain, bus);
777 PR_DEVEL("vga decode %d %x:%x, rc: %d\n", decode, domain, bus, rc);
783 * Called on each cpu to initialize the per_cpu UV data area.
784 * FIXME: hotplug not supported yet
786 void __cpuinit uv_cpu_init(void)
788 /* CPU 0 initilization will be done via uv_system_init. */
792 uv_blade_info[uv_numa_blade_id()].nr_online_cpus++;
794 if (get_uv_system_type() == UV_NON_UNIQUE_APIC)
795 set_x2apic_extra_bits(uv_hub_info->pnode);
799 * When NMI is received, print a stack trace.
801 int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
803 unsigned long real_uv_nmi;
807 * Each blade has an MMR that indicates when an NMI has been sent
808 * to cpus on the blade. If an NMI is detected, atomically
809 * clear the MMR and update a per-blade NMI count used to
810 * cause each cpu on the blade to notice a new NMI.
812 bid = uv_numa_blade_id();
813 real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK);
815 if (unlikely(real_uv_nmi)) {
816 spin_lock(&uv_blade_info[bid].nmi_lock);
817 real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK);
819 uv_blade_info[bid].nmi_count++;
820 uv_write_local_mmr(UVH_NMI_MMR_CLEAR, UV_NMI_PENDING_MASK);
822 spin_unlock(&uv_blade_info[bid].nmi_lock);
825 if (likely(__get_cpu_var(cpu_last_nmi_count) == uv_blade_info[bid].nmi_count))
828 __get_cpu_var(cpu_last_nmi_count) = uv_blade_info[bid].nmi_count;
831 * Use a lock so only one cpu prints at a time.
832 * This prevents intermixed output.
834 spin_lock(&uv_nmi_lock);
835 pr_info("UV NMI stack dump cpu %u:\n", smp_processor_id());
837 spin_unlock(&uv_nmi_lock);
842 void uv_register_nmi_notifier(void)
844 if (register_nmi_handler(NMI_UNKNOWN, uv_handle_nmi, 0, "uv"))
845 printk(KERN_WARNING "UV NMI handler failed to register\n");
848 void uv_nmi_init(void)
853 * Unmask NMI on all cpus
855 value = apic_read(APIC_LVT1) | APIC_DM_NMI;
856 value &= ~APIC_LVT_MASKED;
857 apic_write(APIC_LVT1, value);
860 void __init uv_system_init(void)
862 union uvh_rh_gam_config_mmr_u m_n_config;
863 union uvh_node_id_u node_id;
864 unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size;
865 int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val;
866 int gnode_extra, min_pnode = 999999, max_pnode = -1;
867 unsigned long mmr_base, present, paddr;
868 unsigned short pnode_mask;
869 char *hub = (is_uv1_hub() ? "UV1" :
870 (is_uv2_hub() ? "UV2" :
873 pr_info("UV: Found %s hub\n", hub);
876 m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR );
877 m_val = m_n_config.s.m_skt;
878 n_val = m_n_config.s.n_skt;
879 pnode_mask = (1 << n_val) - 1;
881 uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) &
884 node_id.v = uv_read_local_mmr(UVH_NODE_ID);
885 gnode_extra = (node_id.s.node_id & ~((1 << n_val) - 1)) >> 1;
886 gnode_upper = ((unsigned long)gnode_extra << m_val);
887 pr_info("UV: N:%d M:%d pnode_mask:0x%x gnode_upper/extra:0x%lx/0x%x\n",
888 n_val, m_val, pnode_mask, gnode_upper, gnode_extra);
890 pr_info("UV: global MMR base 0x%lx\n", mmr_base);
892 for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++)
893 uv_possible_blades +=
894 hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE + i * 8));
896 /* uv_num_possible_blades() is really the hub count */
897 pr_info("UV: Found %d blades, %d hubs\n",
898 is_uv1_hub() ? uv_num_possible_blades() :
899 (uv_num_possible_blades() + 1) / 2,
900 uv_num_possible_blades());
902 bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
903 uv_blade_info = kzalloc(bytes, GFP_KERNEL);
904 BUG_ON(!uv_blade_info);
906 for (blade = 0; blade < uv_num_possible_blades(); blade++)
907 uv_blade_info[blade].memory_nid = -1;
909 get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size);
911 bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes();
912 uv_node_to_blade = kmalloc(bytes, GFP_KERNEL);
913 BUG_ON(!uv_node_to_blade);
914 memset(uv_node_to_blade, 255, bytes);
916 bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus();
917 uv_cpu_to_blade = kmalloc(bytes, GFP_KERNEL);
918 BUG_ON(!uv_cpu_to_blade);
919 memset(uv_cpu_to_blade, 255, bytes);
922 for (i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) {
923 present = uv_read_local_mmr(UVH_NODE_PRESENT_TABLE + i * 8);
924 for (j = 0; j < 64; j++) {
925 if (!test_bit(j, &present))
927 pnode = (i * 64 + j) & pnode_mask;
928 uv_blade_info[blade].pnode = pnode;
929 uv_blade_info[blade].nr_possible_cpus = 0;
930 uv_blade_info[blade].nr_online_cpus = 0;
931 spin_lock_init(&uv_blade_info[blade].nmi_lock);
932 min_pnode = min(pnode, min_pnode);
933 max_pnode = max(pnode, max_pnode);
939 uv_bios_get_sn_info(0, &uv_type, &sn_partition_id, &sn_coherency_id,
940 &sn_region_size, &system_serial_number);
943 for_each_present_cpu(cpu) {
944 int apicid = per_cpu(x86_cpu_to_apicid, cpu);
946 nid = cpu_to_node(cpu);
948 * apic_pnode_shift must be set before calling uv_apicid_to_pnode();
950 uv_cpu_hub_info(cpu)->pnode_mask = pnode_mask;
951 uv_cpu_hub_info(cpu)->apic_pnode_shift = uvh_apicid.s.pnode_shift;
952 uv_cpu_hub_info(cpu)->hub_revision = uv_hub_info->hub_revision;
954 uv_cpu_hub_info(cpu)->m_shift = 64 - m_val;
955 uv_cpu_hub_info(cpu)->n_lshift = is_uv2_1_hub() ?
956 (m_val == 40 ? 40 : 39) : m_val;
958 pnode = uv_apicid_to_pnode(apicid);
959 blade = boot_pnode_to_blade(pnode);
960 lcpu = uv_blade_info[blade].nr_possible_cpus;
961 uv_blade_info[blade].nr_possible_cpus++;
963 /* Any node on the blade, else will contain -1. */
964 uv_blade_info[blade].memory_nid = nid;
966 uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base;
967 uv_cpu_hub_info(cpu)->lowmem_remap_top = lowmem_redir_size;
968 uv_cpu_hub_info(cpu)->m_val = m_val;
969 uv_cpu_hub_info(cpu)->n_val = n_val;
970 uv_cpu_hub_info(cpu)->numa_blade_id = blade;
971 uv_cpu_hub_info(cpu)->blade_processor_id = lcpu;
972 uv_cpu_hub_info(cpu)->pnode = pnode;
973 uv_cpu_hub_info(cpu)->gpa_mask = (1UL << (m_val + n_val)) - 1;
974 uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper;
975 uv_cpu_hub_info(cpu)->gnode_extra = gnode_extra;
976 uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
977 uv_cpu_hub_info(cpu)->coherency_domain_number = sn_coherency_id;
978 uv_cpu_hub_info(cpu)->scir.offset = uv_scir_offset(apicid);
979 uv_node_to_blade[nid] = blade;
980 uv_cpu_to_blade[cpu] = blade;
983 /* Add blade/pnode info for nodes without cpus */
984 for_each_online_node(nid) {
985 if (uv_node_to_blade[nid] >= 0)
987 paddr = node_start_pfn(nid) << PAGE_SHIFT;
988 pnode = uv_gpa_to_pnode(uv_soc_phys_ram_to_gpa(paddr));
989 blade = boot_pnode_to_blade(pnode);
990 uv_node_to_blade[nid] = blade;
993 map_gru_high(max_pnode);
994 map_mmr_high(max_pnode);
995 map_mmioh_high(min_pnode, max_pnode);
998 uv_scir_register_cpu_notifier();
999 uv_register_nmi_notifier();
1000 proc_mkdir("sgi_uv", NULL);
1002 /* register Legacy VGA I/O redirection handler */
1003 pci_register_set_vga_state(uv_set_vga_state);
1006 * For a kdump kernel the reset must be BOOT_ACPI, not BOOT_EFI, as
1007 * EFI is not enabled in the kdump kernel.
1009 if (is_kdump_kernel())
1010 reboot_type = BOOT_ACPI;
1013 apic_driver(apic_x2apic_uv_x);