2 * IBM Summit-Specific Code
4 * Written By: Matthew Dobson, IBM Corporation
6 * Copyright (c) 2003 IBM Corp.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or (at
13 * your option) any later version.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
18 * NON INFRINGEMENT. See the GNU General Public License for more
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 * Send feedback to <colpatch@us.ibm.com>
30 #include <linux/init.h>
32 #include <asm/bios_ebda.h>
35 * APIC driver for the IBM "Summit" chipset.
37 #include <linux/threads.h>
38 #include <linux/cpumask.h>
39 #include <asm/mpspec.h>
42 #include <asm/fixmap.h>
43 #include <asm/apicdef.h>
45 #include <linux/kernel.h>
46 #include <linux/string.h>
47 #include <linux/init.h>
48 #include <linux/gfp.h>
49 #include <linux/smp.h>
51 static inline unsigned summit_get_apic_id(unsigned long x)
53 return (x >> 24) & 0xFF;
56 static inline void summit_send_IPI_mask(const cpumask_t *mask, int vector)
58 default_send_IPI_mask_sequence_logical(mask, vector);
61 static inline void summit_send_IPI_allbutself(int vector)
63 cpumask_t mask = cpu_online_map;
64 cpu_clear(smp_processor_id(), mask);
66 if (!cpus_empty(mask))
67 summit_send_IPI_mask(&mask, vector);
70 static inline void summit_send_IPI_all(int vector)
72 summit_send_IPI_mask(&cpu_online_map, vector);
77 extern int use_cyclone;
79 #ifdef CONFIG_X86_SUMMIT_NUMA
80 extern void setup_summit(void);
82 #define setup_summit() {}
86 summit_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
88 if (!strncmp(oem, "IBM ENSW", 8) &&
89 (!strncmp(productid, "VIGIL SMP", 9)
90 || !strncmp(productid, "EXA", 3)
91 || !strncmp(productid, "RUTHLESS SMP", 12))){
92 mark_tsc_unstable("Summit based system");
93 use_cyclone = 1; /*enable cyclone-timer*/
100 /* Hook from generic ACPI tables.c */
101 static inline int summit_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
103 if (!strncmp(oem_id, "IBM", 3) &&
104 (!strncmp(oem_table_id, "SERVIGIL", 8)
105 || !strncmp(oem_table_id, "EXA", 3))){
106 mark_tsc_unstable("Summit based system");
107 use_cyclone = 1; /*enable cyclone-timer*/
114 struct rio_table_hdr {
115 unsigned char version; /* Version number of this data structure */
116 /* Version 3 adds chassis_num & WP_index */
117 unsigned char num_scal_dev; /* # of Scalability devices (Twisters for Vigil) */
118 unsigned char num_rio_dev; /* # of RIO I/O devices (Cyclones and Winnipegs) */
119 } __attribute__((packed));
122 unsigned char node_id; /* Scalability Node ID */
123 unsigned long CBAR; /* Address of 1MB register space */
124 unsigned char port0node; /* Node ID port connected to: 0xFF=None */
125 unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */
126 unsigned char port1node; /* Node ID port connected to: 0xFF = None */
127 unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */
128 unsigned char port2node; /* Node ID port connected to: 0xFF = None */
129 unsigned char port2port; /* Port num port connected to: 0,1,2, or 0xFF=None */
130 unsigned char chassis_num; /* 1 based Chassis number (1 = boot node) */
131 } __attribute__((packed));
134 unsigned char node_id; /* RIO Node ID */
135 unsigned long BBAR; /* Address of 1MB register space */
136 unsigned char type; /* Type of device */
137 unsigned char owner_id; /* For WPEG: Node ID of Cyclone that owns this WPEG*/
138 /* For CYC: Node ID of Twister that owns this CYC */
139 unsigned char port0node; /* Node ID port connected to: 0xFF=None */
140 unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */
141 unsigned char port1node; /* Node ID port connected to: 0xFF=None */
142 unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */
143 unsigned char first_slot; /* For WPEG: Lowest slot number below this WPEG */
145 unsigned char status; /* For WPEG: Bit 0 = 1 : the XAPIC is used */
146 /* = 0 : the XAPIC is not used, ie:*/
147 /* ints fwded to another XAPIC */
148 /* Bits1:7 Reserved */
149 /* For CYC: Bits0:7 Reserved */
150 unsigned char WP_index; /* For WPEG: WPEG instance index - lower ones have */
151 /* lower slot numbers/PCI bus numbers */
152 /* For CYC: No meaning */
153 unsigned char chassis_num; /* 1 based Chassis number */
154 /* For LookOut WPEGs this field indicates the */
155 /* Expansion Chassis #, enumerated from Boot */
156 /* Node WPEG external port, then Boot Node CYC */
157 /* external port, then Next Vigil chassis WPEG */
158 /* external port, etc. */
159 /* Shared Lookouts have only 1 chassis number (the */
160 /* first one assigned) */
161 } __attribute__((packed));
165 CompatTwister = 0, /* Compatibility Twister */
166 AltTwister = 1, /* Alternate Twister of internal 8-way */
167 CompatCyclone = 2, /* Compatibility Cyclone */
168 AltCyclone = 3, /* Alternate Cyclone of internal 8-way */
169 CompatWPEG = 4, /* Compatibility WPEG */
170 AltWPEG = 5, /* Second Planar WPEG */
171 LookOutAWPEG = 6, /* LookOut WPEG */
172 LookOutBWPEG = 7, /* LookOut WPEG */
175 static inline int is_WPEG(struct rio_detail *rio){
176 return (rio->type == CompatWPEG || rio->type == AltWPEG ||
177 rio->type == LookOutAWPEG || rio->type == LookOutBWPEG);
181 /* In clustered mode, the high nibble of APIC ID is a cluster number.
182 * The low nibble is a 4-bit bitmap. */
183 #define XAPIC_DEST_CPUS_SHIFT 4
184 #define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1)
185 #define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT)
187 #define SUMMIT_APIC_DFR_VALUE (APIC_DFR_CLUSTER)
189 static inline const cpumask_t *summit_target_cpus(void)
191 /* CPU_MASK_ALL (0xff) has undefined behaviour with
192 * dest_LowestPrio mode logical clustered apic interrupt routing
193 * Just start on cpu 0. IRQ balancing will spread load
195 return &cpumask_of_cpu(0);
198 static inline unsigned long
199 summit_check_apicid_used(physid_mask_t bitmap, int apicid)
204 /* we don't use the phys_cpu_present_map to indicate apicid presence */
205 static inline unsigned long summit_check_apicid_present(int bit)
210 #define apicid_cluster(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK)
212 extern u8 cpu_2_logical_apicid[];
214 static inline void summit_init_apic_ldr(void)
216 unsigned long val, id;
218 u8 my_id = (u8)hard_smp_processor_id();
219 u8 my_cluster = (u8)apicid_cluster(my_id);
224 /* Create logical APIC IDs by counting CPUs already in cluster. */
225 for (count = 0, i = nr_cpu_ids; --i >= 0; ) {
226 lid = cpu_2_logical_apicid[i];
227 if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster)
231 /* We only have a 4 wide bitmap in cluster mode. If a deranged
232 * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */
233 BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT);
234 id = my_cluster | (1UL << count);
235 apic_write(APIC_DFR, SUMMIT_APIC_DFR_VALUE);
236 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
237 val |= SET_APIC_LOGICAL_ID(id);
238 apic_write(APIC_LDR, val);
241 static inline int summit_apic_id_registered(void)
246 static inline void summit_setup_apic_routing(void)
248 printk("Enabling APIC mode: Summit. Using %d I/O APICs\n",
252 static inline int summit_apicid_to_node(int logical_apicid)
255 return apicid_2_node[hard_smp_processor_id()];
261 /* Mapping from cpu number to logical apicid */
262 static inline int summit_cpu_to_logical_apicid(int cpu)
265 if (cpu >= nr_cpu_ids)
267 return (int)cpu_2_logical_apicid[cpu];
269 return logical_smp_processor_id();
273 static inline int summit_cpu_present_to_apicid(int mps_cpu)
275 if (mps_cpu < nr_cpu_ids)
276 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
281 static inline physid_mask_t
282 summit_ioapic_phys_id_map(physid_mask_t phys_id_map)
284 /* For clustered we don't have a good way to do this yet - hack */
285 return physids_promote(0x0F);
288 static inline physid_mask_t summit_apicid_to_cpu_present(int apicid)
290 return physid_mask_of_physid(0);
293 static inline void summit_setup_portio_remap(void)
297 static inline int summit_check_phys_apicid_present(int boot_cpu_physical_apicid)
302 static inline unsigned int summit_cpu_mask_to_apicid(const cpumask_t *cpumask)
309 num_bits_set = cpus_weight(*cpumask);
310 /* Return id to all */
311 if (num_bits_set >= nr_cpu_ids)
314 * The cpus in the mask must all be on the apic cluster. If are not
315 * on the same apicid cluster return default value of target_cpus():
317 cpu = first_cpu(*cpumask);
318 apicid = summit_cpu_to_logical_apicid(cpu);
320 while (cpus_found < num_bits_set) {
321 if (cpu_isset(cpu, *cpumask)) {
322 int new_apicid = summit_cpu_to_logical_apicid(cpu);
324 if (apicid_cluster(apicid) !=
325 apicid_cluster(new_apicid)) {
326 printk ("%s: Not a valid mask!\n", __func__);
330 apicid = apicid | new_apicid;
338 static inline unsigned int
339 summit_cpu_mask_to_apicid_and(const struct cpumask *inmask,
340 const struct cpumask *andmask)
342 int apicid = summit_cpu_to_logical_apicid(0);
343 cpumask_var_t cpumask;
345 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
348 cpumask_and(cpumask, inmask, andmask);
349 cpumask_and(cpumask, cpumask, cpu_online_mask);
350 apicid = summit_cpu_mask_to_apicid(cpumask);
352 free_cpumask_var(cpumask);
358 * cpuid returns the value latched in the HW at reset, not the APIC ID
359 * register's value. For any box whose BIOS changes APIC IDs, like
360 * clustered APIC systems, we must use hard_smp_processor_id.
362 * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID.
364 static inline int summit_phys_pkg_id(int cpuid_apic, int index_msb)
366 return hard_smp_processor_id() >> index_msb;
369 static int probe_summit(void)
371 /* probed later in mptable/ACPI hooks */
375 static void summit_vector_allocation_domain(int cpu, cpumask_t *retmask)
377 /* Careful. Some cpus do not strictly honor the set of cpus
378 * specified in the interrupt destination when using lowest
379 * priority interrupt delivery mode.
381 * In particular there was a hyperthreading cpu observed to
382 * deliver interrupts to the wrong hyperthread when only one
383 * hyperthread was specified in the interrupt desitination.
385 *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
388 #ifdef CONFIG_X86_SUMMIT_NUMA
389 static struct rio_table_hdr *rio_table_hdr __initdata;
390 static struct scal_detail *scal_devs[MAX_NUMNODES] __initdata;
391 static struct rio_detail *rio_devs[MAX_NUMNODES*4] __initdata;
393 #ifndef CONFIG_X86_NUMAQ
394 static int mp_bus_id_to_node[MAX_MP_BUSSES] __initdata;
397 static int __init setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus)
399 int twister = 0, node = 0;
400 int i, bus, num_buses;
402 for (i = 0; i < rio_table_hdr->num_rio_dev; i++) {
403 if (rio_devs[i]->node_id == rio_devs[wpeg_num]->owner_id) {
404 twister = rio_devs[i]->owner_id;
408 if (i == rio_table_hdr->num_rio_dev) {
409 printk(KERN_ERR "%s: Couldn't find owner Cyclone for Winnipeg!\n", __func__);
413 for (i = 0; i < rio_table_hdr->num_scal_dev; i++) {
414 if (scal_devs[i]->node_id == twister) {
415 node = scal_devs[i]->node_id;
419 if (i == rio_table_hdr->num_scal_dev) {
420 printk(KERN_ERR "%s: Couldn't find owner Twister for Cyclone!\n", __func__);
424 switch (rio_devs[wpeg_num]->type) {
427 * The Compatibility Winnipeg controls the 2 legacy buses,
428 * the 66MHz PCI bus [2 slots] and the 2 "extra" buses in case
429 * a PCI-PCI bridge card is used in either slot: total 5 buses.
435 * The Alternate Winnipeg controls the 2 133MHz buses [1 slot
436 * each], their 2 "extra" buses, the 100MHz bus [2 slots] and
437 * the "extra" buses for each of those slots: total 7 buses.
444 * A Lookout Winnipeg controls 3 100MHz buses [2 slots each]
445 * & the "extra" buses for each of those slots: total 9 buses.
450 printk(KERN_INFO "%s: Unsupported Winnipeg type!\n", __func__);
454 for (bus = last_bus; bus < last_bus + num_buses; bus++)
455 mp_bus_id_to_node[bus] = node;
459 static int __init build_detail_arrays(void)
462 int i, scal_detail_size, rio_detail_size;
464 if (rio_table_hdr->num_scal_dev > MAX_NUMNODES) {
465 printk(KERN_WARNING "%s: MAX_NUMNODES too low! Defined as %d, but system has %d nodes.\n", __func__, MAX_NUMNODES, rio_table_hdr->num_scal_dev);
469 switch (rio_table_hdr->version) {
471 printk(KERN_WARNING "%s: Invalid Rio Grande Table Version: %d\n", __func__, rio_table_hdr->version);
474 scal_detail_size = 11;
475 rio_detail_size = 13;
478 scal_detail_size = 12;
479 rio_detail_size = 15;
483 ptr = (unsigned long)rio_table_hdr + 3;
484 for (i = 0; i < rio_table_hdr->num_scal_dev; i++, ptr += scal_detail_size)
485 scal_devs[i] = (struct scal_detail *)ptr;
487 for (i = 0; i < rio_table_hdr->num_rio_dev; i++, ptr += rio_detail_size)
488 rio_devs[i] = (struct rio_detail *)ptr;
493 void __init setup_summit(void)
496 unsigned short offset;
497 int i, next_wpeg, next_bus = 0;
499 /* The pointer to the EBDA is stored in the word @ phys 0x40E(40:0E) */
500 ptr = get_bios_ebda();
501 ptr = (unsigned long)phys_to_virt(ptr);
503 rio_table_hdr = NULL;
506 /* The block id is stored in the 2nd word */
507 if (*((unsigned short *)(ptr + offset + 2)) == 0x4752) {
508 /* set the pointer past the offset & block id */
509 rio_table_hdr = (struct rio_table_hdr *)(ptr + offset + 4);
512 /* The next offset is stored in the 1st word. 0 means no more */
513 offset = *((unsigned short *)(ptr + offset));
515 if (!rio_table_hdr) {
516 printk(KERN_ERR "%s: Unable to locate Rio Grande Table in EBDA - bailing!\n", __func__);
520 if (!build_detail_arrays())
523 /* The first Winnipeg we're looking for has an index of 0 */
526 for (i = 0; i < rio_table_hdr->num_rio_dev; i++) {
527 if (is_WPEG(rio_devs[i]) && rio_devs[i]->WP_index == next_wpeg) {
528 /* It's the Winnipeg we're looking for! */
529 next_bus = setup_pci_node_map_for_wpeg(i, next_bus);
535 * If we go through all Rio devices and don't find one with
536 * the next index, it means we've found all the Winnipegs,
537 * and thus all the PCI buses.
539 if (i == rio_table_hdr->num_rio_dev)
541 } while (next_wpeg != 0);
545 struct genapic apic_summit = {
548 .probe = probe_summit,
549 .acpi_madt_oem_check = summit_acpi_madt_oem_check,
550 .apic_id_registered = summit_apic_id_registered,
552 .irq_delivery_mode = dest_LowestPrio,
553 /* logical delivery broadcast to all CPUs: */
556 .target_cpus = summit_target_cpus,
558 .dest_logical = APIC_DEST_LOGICAL,
559 .check_apicid_used = summit_check_apicid_used,
560 .check_apicid_present = summit_check_apicid_present,
562 .vector_allocation_domain = summit_vector_allocation_domain,
563 .init_apic_ldr = summit_init_apic_ldr,
565 .ioapic_phys_id_map = summit_ioapic_phys_id_map,
566 .setup_apic_routing = summit_setup_apic_routing,
567 .multi_timer_check = NULL,
568 .apicid_to_node = summit_apicid_to_node,
569 .cpu_to_logical_apicid = summit_cpu_to_logical_apicid,
570 .cpu_present_to_apicid = summit_cpu_present_to_apicid,
571 .apicid_to_cpu_present = summit_apicid_to_cpu_present,
572 .setup_portio_remap = NULL,
573 .check_phys_apicid_present = summit_check_phys_apicid_present,
574 .enable_apic_mode = NULL,
575 .phys_pkg_id = summit_phys_pkg_id,
576 .mps_oem_check = summit_mps_oem_check,
578 .get_apic_id = summit_get_apic_id,
580 .apic_id_mask = 0xFF << 24,
582 .cpu_mask_to_apicid = summit_cpu_mask_to_apicid,
583 .cpu_mask_to_apicid_and = summit_cpu_mask_to_apicid_and,
585 .send_IPI_mask = summit_send_IPI_mask,
586 .send_IPI_mask_allbutself = NULL,
587 .send_IPI_allbutself = summit_send_IPI_allbutself,
588 .send_IPI_all = summit_send_IPI_all,
589 .send_IPI_self = default_send_IPI_self,
592 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
593 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
595 .wait_for_init_deassert = default_wait_for_init_deassert,
597 .smp_callin_clear_local_apic = NULL,
598 .store_NMI_vector = NULL,
599 .inquire_remote_apic = default_inquire_remote_apic,
601 .read = native_apic_mem_read,
602 .write = native_apic_mem_write,
603 .icr_read = native_apic_icr_read,
604 .icr_write = native_apic_icr_write,
605 .wait_icr_idle = native_apic_wait_icr_idle,
606 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,