2 * Intel IO-APIC support for multi-Pentium hosts.
4 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
6 * Many thanks to Stig Venaas for trying out countless experimental
7 * patches and reporting/debugging problems patiently!
9 * (c) 1999, Multiple IO-APIC support, developed by
10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
12 * further tested and cleaned up by Zach Brown <zab@redhat.com>
13 * and Ingo Molnar <mingo@redhat.com>
16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
17 * thanks to Eric Gilmore
19 * for testing these extensively
20 * Paul Diefenbaugh : Added full ACPI support
24 #include <linux/interrupt.h>
25 #include <linux/init.h>
26 #include <linux/delay.h>
27 #include <linux/sched.h>
28 #include <linux/pci.h>
29 #include <linux/mc146818rtc.h>
30 #include <linux/compiler.h>
31 #include <linux/acpi.h>
32 #include <linux/module.h>
33 #include <linux/sysdev.h>
34 #include <linux/msi.h>
35 #include <linux/htirq.h>
36 #include <linux/freezer.h>
37 #include <linux/kthread.h>
38 #include <linux/jiffies.h> /* time_after() */
39 #include <linux/slab.h>
41 #include <acpi/acpi_bus.h>
43 #include <linux/bootmem.h>
44 #include <linux/dmar.h>
45 #include <linux/hpet.h>
52 #include <asm/proto.h>
55 #include <asm/timer.h>
56 #include <asm/i8259.h>
58 #include <asm/msidef.h>
59 #include <asm/hypertransport.h>
60 #include <asm/setup.h>
61 #include <asm/irq_remapping.h>
63 #include <asm/hw_irq.h>
67 #define __apicdebuginit(type) static type __init
68 #define for_each_irq_pin(entry, head) \
69 for (entry = head; entry; entry = entry->next)
72 * Is the SiS APIC rmw bug present ?
73 * -1 = don't know, 0 = no, 1 = yes
75 int sis_apic_bug = -1;
77 static DEFINE_RAW_SPINLOCK(ioapic_lock);
78 static DEFINE_RAW_SPINLOCK(vector_lock);
81 * # of IRQ routing registers
83 int nr_ioapic_registers[MAX_IO_APICS];
85 /* I/O APIC entries */
86 struct mpc_ioapic mp_ioapics[MAX_IO_APICS];
89 /* IO APIC gsi routing info */
90 struct mp_ioapic_gsi mp_gsi_routing[MAX_IO_APICS];
92 /* The one past the highest gsi number used */
95 /* MP IRQ source entries */
96 struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
98 /* # of MP IRQ source entries */
102 static int nr_irqs_gsi = NR_IRQS_LEGACY;
104 #if defined (CONFIG_MCA) || defined (CONFIG_EISA)
105 int mp_bus_id_to_type[MAX_MP_BUSSES];
108 DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
110 int skip_ioapic_setup;
112 void arch_disable_smp_support(void)
116 noioapicreroute = -1;
118 skip_ioapic_setup = 1;
121 static int __init parse_noapic(char *str)
123 /* disable IO-APIC */
124 arch_disable_smp_support();
127 early_param("noapic", parse_noapic);
129 struct irq_pin_list {
131 struct irq_pin_list *next;
134 static struct irq_pin_list *get_one_free_irq_2_pin(int node)
136 struct irq_pin_list *pin;
138 pin = kzalloc_node(sizeof(*pin), GFP_ATOMIC, node);
143 /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
144 #ifdef CONFIG_SPARSE_IRQ
145 static struct irq_cfg irq_cfgx[NR_IRQS_LEGACY];
147 static struct irq_cfg irq_cfgx[NR_IRQS];
150 int __init arch_early_irq_init(void)
153 struct irq_desc *desc;
158 if (!legacy_pic->nr_legacy_irqs) {
164 count = ARRAY_SIZE(irq_cfgx);
165 node = cpu_to_node(0);
167 for (i = 0; i < count; i++) {
168 desc = irq_to_desc(i);
169 desc->chip_data = &cfg[i];
170 zalloc_cpumask_var_node(&cfg[i].domain, GFP_NOWAIT, node);
171 zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_NOWAIT, node);
173 * For legacy IRQ's, start with assigning irq0 to irq15 to
174 * IRQ0_VECTOR to IRQ15_VECTOR on cpu 0.
176 if (i < legacy_pic->nr_legacy_irqs) {
177 cfg[i].vector = IRQ0_VECTOR + i;
178 cpumask_set_cpu(0, cfg[i].domain);
185 #ifdef CONFIG_SPARSE_IRQ
186 struct irq_cfg *irq_cfg(unsigned int irq)
188 struct irq_cfg *cfg = NULL;
189 struct irq_desc *desc;
191 desc = irq_to_desc(irq);
193 cfg = get_irq_desc_chip_data(desc);
198 static struct irq_cfg *get_one_free_irq_cfg(int node)
202 cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node);
204 if (!zalloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) {
207 } else if (!zalloc_cpumask_var_node(&cfg->old_domain,
209 free_cpumask_var(cfg->domain);
218 int arch_init_chip_data(struct irq_desc *desc, int node)
222 cfg = get_irq_desc_chip_data(desc);
224 cfg = get_one_free_irq_cfg(node);
225 desc->chip_data = cfg;
227 printk(KERN_ERR "can not alloc irq_cfg\n");
235 /* for move_irq_desc */
237 init_copy_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg, int node)
239 struct irq_pin_list *old_entry, *head, *tail, *entry;
241 cfg->irq_2_pin = NULL;
242 old_entry = old_cfg->irq_2_pin;
246 entry = get_one_free_irq_2_pin(node);
250 entry->apic = old_entry->apic;
251 entry->pin = old_entry->pin;
254 old_entry = old_entry->next;
256 entry = get_one_free_irq_2_pin(node);
264 /* still use the old one */
267 entry->apic = old_entry->apic;
268 entry->pin = old_entry->pin;
271 old_entry = old_entry->next;
275 cfg->irq_2_pin = head;
278 static void free_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg)
280 struct irq_pin_list *entry, *next;
282 if (old_cfg->irq_2_pin == cfg->irq_2_pin)
285 entry = old_cfg->irq_2_pin;
292 old_cfg->irq_2_pin = NULL;
295 void arch_init_copy_chip_data(struct irq_desc *old_desc,
296 struct irq_desc *desc, int node)
299 struct irq_cfg *old_cfg;
301 cfg = get_one_free_irq_cfg(node);
306 desc->chip_data = cfg;
308 old_cfg = old_desc->chip_data;
310 cfg->vector = old_cfg->vector;
311 cfg->move_in_progress = old_cfg->move_in_progress;
312 cpumask_copy(cfg->domain, old_cfg->domain);
313 cpumask_copy(cfg->old_domain, old_cfg->old_domain);
315 init_copy_irq_2_pin(old_cfg, cfg, node);
318 static void free_irq_cfg(struct irq_cfg *cfg)
320 free_cpumask_var(cfg->domain);
321 free_cpumask_var(cfg->old_domain);
325 void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc)
327 struct irq_cfg *old_cfg, *cfg;
329 old_cfg = get_irq_desc_chip_data(old_desc);
330 cfg = get_irq_desc_chip_data(desc);
336 free_irq_2_pin(old_cfg, cfg);
337 free_irq_cfg(old_cfg);
338 old_desc->chip_data = NULL;
341 /* end for move_irq_desc */
344 struct irq_cfg *irq_cfg(unsigned int irq)
346 return irq < nr_irqs ? irq_cfgx + irq : NULL;
353 unsigned int unused[3];
355 unsigned int unused2[11];
359 static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
361 return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
362 + (mp_ioapics[idx].apicaddr & ~PAGE_MASK);
365 static inline void io_apic_eoi(unsigned int apic, unsigned int vector)
367 struct io_apic __iomem *io_apic = io_apic_base(apic);
368 writel(vector, &io_apic->eoi);
371 static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
373 struct io_apic __iomem *io_apic = io_apic_base(apic);
374 writel(reg, &io_apic->index);
375 return readl(&io_apic->data);
378 static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
380 struct io_apic __iomem *io_apic = io_apic_base(apic);
381 writel(reg, &io_apic->index);
382 writel(value, &io_apic->data);
386 * Re-write a value: to be used for read-modify-write
387 * cycles where the read already set up the index register.
389 * Older SiS APIC requires we rewrite the index register
391 static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
393 struct io_apic __iomem *io_apic = io_apic_base(apic);
396 writel(reg, &io_apic->index);
397 writel(value, &io_apic->data);
400 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
402 struct irq_pin_list *entry;
405 raw_spin_lock_irqsave(&ioapic_lock, flags);
406 for_each_irq_pin(entry, cfg->irq_2_pin) {
411 reg = io_apic_read(entry->apic, 0x10 + pin*2);
412 /* Is the remote IRR bit set? */
413 if (reg & IO_APIC_REDIR_REMOTE_IRR) {
414 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
418 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
424 struct { u32 w1, w2; };
425 struct IO_APIC_route_entry entry;
428 static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
430 union entry_union eu;
432 raw_spin_lock_irqsave(&ioapic_lock, flags);
433 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
434 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
435 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
440 * When we write a new IO APIC routing entry, we need to write the high
441 * word first! If the mask bit in the low word is clear, we will enable
442 * the interrupt, and we need to make sure the entry is fully populated
443 * before that happens.
446 __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
448 union entry_union eu = {{0, 0}};
451 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
452 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
455 void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
458 raw_spin_lock_irqsave(&ioapic_lock, flags);
459 __ioapic_write_entry(apic, pin, e);
460 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
464 * When we mask an IO APIC routing entry, we need to write the low
465 * word first, in order to set the mask bit before we change the
468 static void ioapic_mask_entry(int apic, int pin)
471 union entry_union eu = { .entry.mask = 1 };
473 raw_spin_lock_irqsave(&ioapic_lock, flags);
474 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
475 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
476 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
480 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
481 * shared ISA-space IRQs, so we have to support them. We are super
482 * fast in the common case, and fast for shared ISA-space IRQs.
485 add_pin_to_irq_node_nopanic(struct irq_cfg *cfg, int node, int apic, int pin)
487 struct irq_pin_list **last, *entry;
489 /* don't allow duplicates */
490 last = &cfg->irq_2_pin;
491 for_each_irq_pin(entry, cfg->irq_2_pin) {
492 if (entry->apic == apic && entry->pin == pin)
497 entry = get_one_free_irq_2_pin(node);
499 printk(KERN_ERR "can not alloc irq_pin_list (%d,%d,%d)\n",
510 static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin)
512 if (add_pin_to_irq_node_nopanic(cfg, node, apic, pin))
513 panic("IO-APIC: failed to add irq-pin. Can not proceed\n");
517 * Reroute an IRQ to a different pin.
519 static void __init replace_pin_at_irq_node(struct irq_cfg *cfg, int node,
520 int oldapic, int oldpin,
521 int newapic, int newpin)
523 struct irq_pin_list *entry;
525 for_each_irq_pin(entry, cfg->irq_2_pin) {
526 if (entry->apic == oldapic && entry->pin == oldpin) {
527 entry->apic = newapic;
529 /* every one is different, right? */
534 /* old apic/pin didn't exist, so just add new ones */
535 add_pin_to_irq_node(cfg, node, newapic, newpin);
538 static void __io_apic_modify_irq(struct irq_pin_list *entry,
539 int mask_and, int mask_or,
540 void (*final)(struct irq_pin_list *entry))
542 unsigned int reg, pin;
545 reg = io_apic_read(entry->apic, 0x10 + pin * 2);
548 io_apic_modify(entry->apic, 0x10 + pin * 2, reg);
553 static void io_apic_modify_irq(struct irq_cfg *cfg,
554 int mask_and, int mask_or,
555 void (*final)(struct irq_pin_list *entry))
557 struct irq_pin_list *entry;
559 for_each_irq_pin(entry, cfg->irq_2_pin)
560 __io_apic_modify_irq(entry, mask_and, mask_or, final);
563 static void __mask_and_edge_IO_APIC_irq(struct irq_pin_list *entry)
565 __io_apic_modify_irq(entry, ~IO_APIC_REDIR_LEVEL_TRIGGER,
566 IO_APIC_REDIR_MASKED, NULL);
569 static void __unmask_and_level_IO_APIC_irq(struct irq_pin_list *entry)
571 __io_apic_modify_irq(entry, ~IO_APIC_REDIR_MASKED,
572 IO_APIC_REDIR_LEVEL_TRIGGER, NULL);
575 static void __unmask_IO_APIC_irq(struct irq_cfg *cfg)
577 io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL);
580 static void io_apic_sync(struct irq_pin_list *entry)
583 * Synchronize the IO-APIC and the CPU by doing
584 * a dummy read from the IO-APIC
586 struct io_apic __iomem *io_apic;
587 io_apic = io_apic_base(entry->apic);
588 readl(&io_apic->data);
591 static void __mask_IO_APIC_irq(struct irq_cfg *cfg)
593 io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync);
596 static void mask_IO_APIC_irq_desc(struct irq_desc *desc)
598 struct irq_cfg *cfg = get_irq_desc_chip_data(desc);
603 raw_spin_lock_irqsave(&ioapic_lock, flags);
604 __mask_IO_APIC_irq(cfg);
605 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
608 static void unmask_IO_APIC_irq_desc(struct irq_desc *desc)
610 struct irq_cfg *cfg = get_irq_desc_chip_data(desc);
613 raw_spin_lock_irqsave(&ioapic_lock, flags);
614 __unmask_IO_APIC_irq(cfg);
615 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
618 static void mask_IO_APIC_irq(unsigned int irq)
620 struct irq_desc *desc = irq_to_desc(irq);
622 mask_IO_APIC_irq_desc(desc);
624 static void unmask_IO_APIC_irq(unsigned int irq)
626 struct irq_desc *desc = irq_to_desc(irq);
628 unmask_IO_APIC_irq_desc(desc);
631 static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
633 struct IO_APIC_route_entry entry;
635 /* Check delivery_mode to be sure we're not clearing an SMI pin */
636 entry = ioapic_read_entry(apic, pin);
637 if (entry.delivery_mode == dest_SMI)
640 * Disable it in the IO-APIC irq-routing table:
642 ioapic_mask_entry(apic, pin);
645 static void clear_IO_APIC (void)
649 for (apic = 0; apic < nr_ioapics; apic++)
650 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
651 clear_IO_APIC_pin(apic, pin);
656 * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
657 * specific CPU-side IRQs.
661 static int pirq_entries[MAX_PIRQS] = {
662 [0 ... MAX_PIRQS - 1] = -1
665 static int __init ioapic_pirq_setup(char *str)
668 int ints[MAX_PIRQS+1];
670 get_options(str, ARRAY_SIZE(ints), ints);
672 apic_printk(APIC_VERBOSE, KERN_INFO
673 "PIRQ redirection, working around broken MP-BIOS.\n");
675 if (ints[0] < MAX_PIRQS)
678 for (i = 0; i < max; i++) {
679 apic_printk(APIC_VERBOSE, KERN_DEBUG
680 "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
682 * PIRQs are mapped upside down, usually.
684 pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
689 __setup("pirq=", ioapic_pirq_setup);
690 #endif /* CONFIG_X86_32 */
692 struct IO_APIC_route_entry **alloc_ioapic_entries(void)
695 struct IO_APIC_route_entry **ioapic_entries;
697 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
702 for (apic = 0; apic < nr_ioapics; apic++) {
703 ioapic_entries[apic] =
704 kzalloc(sizeof(struct IO_APIC_route_entry) *
705 nr_ioapic_registers[apic], GFP_ATOMIC);
706 if (!ioapic_entries[apic])
710 return ioapic_entries;
714 kfree(ioapic_entries[apic]);
715 kfree(ioapic_entries);
721 * Saves all the IO-APIC RTE's
723 int save_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries)
730 for (apic = 0; apic < nr_ioapics; apic++) {
731 if (!ioapic_entries[apic])
734 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
735 ioapic_entries[apic][pin] =
736 ioapic_read_entry(apic, pin);
743 * Mask all IO APIC entries.
745 void mask_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries)
752 for (apic = 0; apic < nr_ioapics; apic++) {
753 if (!ioapic_entries[apic])
756 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
757 struct IO_APIC_route_entry entry;
759 entry = ioapic_entries[apic][pin];
762 ioapic_write_entry(apic, pin, entry);
769 * Restore IO APIC entries which was saved in ioapic_entries.
771 int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries)
778 for (apic = 0; apic < nr_ioapics; apic++) {
779 if (!ioapic_entries[apic])
782 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
783 ioapic_write_entry(apic, pin,
784 ioapic_entries[apic][pin]);
789 void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries)
793 for (apic = 0; apic < nr_ioapics; apic++)
794 kfree(ioapic_entries[apic]);
796 kfree(ioapic_entries);
800 * Find the IRQ entry number of a certain pin.
802 static int find_irq_entry(int apic, int pin, int type)
806 for (i = 0; i < mp_irq_entries; i++)
807 if (mp_irqs[i].irqtype == type &&
808 (mp_irqs[i].dstapic == mp_ioapics[apic].apicid ||
809 mp_irqs[i].dstapic == MP_APIC_ALL) &&
810 mp_irqs[i].dstirq == pin)
817 * Find the pin to which IRQ[irq] (ISA) is connected
819 static int __init find_isa_irq_pin(int irq, int type)
823 for (i = 0; i < mp_irq_entries; i++) {
824 int lbus = mp_irqs[i].srcbus;
826 if (test_bit(lbus, mp_bus_not_pci) &&
827 (mp_irqs[i].irqtype == type) &&
828 (mp_irqs[i].srcbusirq == irq))
830 return mp_irqs[i].dstirq;
835 static int __init find_isa_irq_apic(int irq, int type)
839 for (i = 0; i < mp_irq_entries; i++) {
840 int lbus = mp_irqs[i].srcbus;
842 if (test_bit(lbus, mp_bus_not_pci) &&
843 (mp_irqs[i].irqtype == type) &&
844 (mp_irqs[i].srcbusirq == irq))
847 if (i < mp_irq_entries) {
849 for(apic = 0; apic < nr_ioapics; apic++) {
850 if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic)
858 #if defined(CONFIG_EISA) || defined(CONFIG_MCA)
860 * EISA Edge/Level control register, ELCR
862 static int EISA_ELCR(unsigned int irq)
864 if (irq < legacy_pic->nr_legacy_irqs) {
865 unsigned int port = 0x4d0 + (irq >> 3);
866 return (inb(port) >> (irq & 7)) & 1;
868 apic_printk(APIC_VERBOSE, KERN_INFO
869 "Broken MPtable reports ISA irq %d\n", irq);
875 /* ISA interrupts are always polarity zero edge triggered,
876 * when listed as conforming in the MP table. */
878 #define default_ISA_trigger(idx) (0)
879 #define default_ISA_polarity(idx) (0)
881 /* EISA interrupts are always polarity zero and can be edge or level
882 * trigger depending on the ELCR value. If an interrupt is listed as
883 * EISA conforming in the MP table, that means its trigger type must
884 * be read in from the ELCR */
886 #define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].srcbusirq))
887 #define default_EISA_polarity(idx) default_ISA_polarity(idx)
889 /* PCI interrupts are always polarity one level triggered,
890 * when listed as conforming in the MP table. */
892 #define default_PCI_trigger(idx) (1)
893 #define default_PCI_polarity(idx) (1)
895 /* MCA interrupts are always polarity zero level triggered,
896 * when listed as conforming in the MP table. */
898 #define default_MCA_trigger(idx) (1)
899 #define default_MCA_polarity(idx) default_ISA_polarity(idx)
901 static int MPBIOS_polarity(int idx)
903 int bus = mp_irqs[idx].srcbus;
907 * Determine IRQ line polarity (high active or low active):
909 switch (mp_irqs[idx].irqflag & 3)
911 case 0: /* conforms, ie. bus-type dependent polarity */
912 if (test_bit(bus, mp_bus_not_pci))
913 polarity = default_ISA_polarity(idx);
915 polarity = default_PCI_polarity(idx);
917 case 1: /* high active */
922 case 2: /* reserved */
924 printk(KERN_WARNING "broken BIOS!!\n");
928 case 3: /* low active */
933 default: /* invalid */
935 printk(KERN_WARNING "broken BIOS!!\n");
943 static int MPBIOS_trigger(int idx)
945 int bus = mp_irqs[idx].srcbus;
949 * Determine IRQ trigger mode (edge or level sensitive):
951 switch ((mp_irqs[idx].irqflag>>2) & 3)
953 case 0: /* conforms, ie. bus-type dependent */
954 if (test_bit(bus, mp_bus_not_pci))
955 trigger = default_ISA_trigger(idx);
957 trigger = default_PCI_trigger(idx);
958 #if defined(CONFIG_EISA) || defined(CONFIG_MCA)
959 switch (mp_bus_id_to_type[bus]) {
960 case MP_BUS_ISA: /* ISA pin */
962 /* set before the switch */
965 case MP_BUS_EISA: /* EISA pin */
967 trigger = default_EISA_trigger(idx);
970 case MP_BUS_PCI: /* PCI pin */
972 /* set before the switch */
975 case MP_BUS_MCA: /* MCA pin */
977 trigger = default_MCA_trigger(idx);
982 printk(KERN_WARNING "broken BIOS!!\n");
994 case 2: /* reserved */
996 printk(KERN_WARNING "broken BIOS!!\n");
1005 default: /* invalid */
1007 printk(KERN_WARNING "broken BIOS!!\n");
1015 static inline int irq_polarity(int idx)
1017 return MPBIOS_polarity(idx);
1020 static inline int irq_trigger(int idx)
1022 return MPBIOS_trigger(idx);
1025 static int pin_2_irq(int idx, int apic, int pin)
1028 int bus = mp_irqs[idx].srcbus;
1031 * Debugging check, we are in big trouble if this message pops up!
1033 if (mp_irqs[idx].dstirq != pin)
1034 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
1036 if (test_bit(bus, mp_bus_not_pci)) {
1037 irq = mp_irqs[idx].srcbusirq;
1039 u32 gsi = mp_gsi_routing[apic].gsi_base + pin;
1041 if (gsi >= NR_IRQS_LEGACY)
1044 irq = gsi_top + gsi;
1047 #ifdef CONFIG_X86_32
1049 * PCI IRQ command line redirection. Yes, limits are hardcoded.
1051 if ((pin >= 16) && (pin <= 23)) {
1052 if (pirq_entries[pin-16] != -1) {
1053 if (!pirq_entries[pin-16]) {
1054 apic_printk(APIC_VERBOSE, KERN_DEBUG
1055 "disabling PIRQ%d\n", pin-16);
1057 irq = pirq_entries[pin-16];
1058 apic_printk(APIC_VERBOSE, KERN_DEBUG
1059 "using PIRQ%d -> IRQ %d\n",
1070 * Find a specific PCI IRQ entry.
1071 * Not an __init, possibly needed by modules
1073 int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
1074 struct io_apic_irq_attr *irq_attr)
1076 int apic, i, best_guess = -1;
1078 apic_printk(APIC_DEBUG,
1079 "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
1081 if (test_bit(bus, mp_bus_not_pci)) {
1082 apic_printk(APIC_VERBOSE,
1083 "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
1086 for (i = 0; i < mp_irq_entries; i++) {
1087 int lbus = mp_irqs[i].srcbus;
1089 for (apic = 0; apic < nr_ioapics; apic++)
1090 if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic ||
1091 mp_irqs[i].dstapic == MP_APIC_ALL)
1094 if (!test_bit(lbus, mp_bus_not_pci) &&
1095 !mp_irqs[i].irqtype &&
1097 (slot == ((mp_irqs[i].srcbusirq >> 2) & 0x1f))) {
1098 int irq = pin_2_irq(i, apic, mp_irqs[i].dstirq);
1100 if (!(apic || IO_APIC_IRQ(irq)))
1103 if (pin == (mp_irqs[i].srcbusirq & 3)) {
1104 set_io_apic_irq_attr(irq_attr, apic,
1111 * Use the first all-but-pin matching entry as a
1112 * best-guess fuzzy result for broken mptables.
1114 if (best_guess < 0) {
1115 set_io_apic_irq_attr(irq_attr, apic,
1125 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
1127 void lock_vector_lock(void)
1129 /* Used to the online set of cpus does not change
1130 * during assign_irq_vector.
1132 raw_spin_lock(&vector_lock);
1135 void unlock_vector_lock(void)
1137 raw_spin_unlock(&vector_lock);
1141 __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1144 * NOTE! The local APIC isn't very good at handling
1145 * multiple interrupts at the same interrupt level.
1146 * As the interrupt level is determined by taking the
1147 * vector number and shifting that right by 4, we
1148 * want to spread these out a bit so that they don't
1149 * all fall in the same interrupt level.
1151 * Also, we've got to be careful not to trash gate
1152 * 0x80, because int 0x80 is hm, kind of importantish. ;)
1154 static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
1155 static int current_offset = VECTOR_OFFSET_START % 8;
1156 unsigned int old_vector;
1158 cpumask_var_t tmp_mask;
1160 if (cfg->move_in_progress)
1163 if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
1166 old_vector = cfg->vector;
1168 cpumask_and(tmp_mask, mask, cpu_online_mask);
1169 cpumask_and(tmp_mask, cfg->domain, tmp_mask);
1170 if (!cpumask_empty(tmp_mask)) {
1171 free_cpumask_var(tmp_mask);
1176 /* Only try and allocate irqs on cpus that are present */
1178 for_each_cpu_and(cpu, mask, cpu_online_mask) {
1182 apic->vector_allocation_domain(cpu, tmp_mask);
1184 vector = current_vector;
1185 offset = current_offset;
1188 if (vector >= first_system_vector) {
1189 /* If out of vectors on large boxen, must share them. */
1190 offset = (offset + 1) % 8;
1191 vector = FIRST_EXTERNAL_VECTOR + offset;
1193 if (unlikely(current_vector == vector))
1196 if (test_bit(vector, used_vectors))
1199 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
1200 if (per_cpu(vector_irq, new_cpu)[vector] != -1)
1203 current_vector = vector;
1204 current_offset = offset;
1206 cfg->move_in_progress = 1;
1207 cpumask_copy(cfg->old_domain, cfg->domain);
1209 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
1210 per_cpu(vector_irq, new_cpu)[vector] = irq;
1211 cfg->vector = vector;
1212 cpumask_copy(cfg->domain, tmp_mask);
1216 free_cpumask_var(tmp_mask);
1220 int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1223 unsigned long flags;
1225 raw_spin_lock_irqsave(&vector_lock, flags);
1226 err = __assign_irq_vector(irq, cfg, mask);
1227 raw_spin_unlock_irqrestore(&vector_lock, flags);
1231 static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
1235 BUG_ON(!cfg->vector);
1237 vector = cfg->vector;
1238 for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
1239 per_cpu(vector_irq, cpu)[vector] = -1;
1242 cpumask_clear(cfg->domain);
1244 if (likely(!cfg->move_in_progress))
1246 for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
1247 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
1249 if (per_cpu(vector_irq, cpu)[vector] != irq)
1251 per_cpu(vector_irq, cpu)[vector] = -1;
1255 cfg->move_in_progress = 0;
1258 void __setup_vector_irq(int cpu)
1260 /* Initialize vector_irq on a new cpu */
1262 struct irq_cfg *cfg;
1263 struct irq_desc *desc;
1266 * vector_lock will make sure that we don't run into irq vector
1267 * assignments that might be happening on another cpu in parallel,
1268 * while we setup our initial vector to irq mappings.
1270 raw_spin_lock(&vector_lock);
1271 /* Mark the inuse vectors */
1272 for_each_irq_desc(irq, desc) {
1273 cfg = get_irq_desc_chip_data(desc);
1276 * If it is a legacy IRQ handled by the legacy PIC, this cpu
1277 * will be part of the irq_cfg's domain.
1279 if (irq < legacy_pic->nr_legacy_irqs && !IO_APIC_IRQ(irq))
1280 cpumask_set_cpu(cpu, cfg->domain);
1282 if (!cpumask_test_cpu(cpu, cfg->domain))
1284 vector = cfg->vector;
1285 per_cpu(vector_irq, cpu)[vector] = irq;
1287 /* Mark the free vectors */
1288 for (vector = 0; vector < NR_VECTORS; ++vector) {
1289 irq = per_cpu(vector_irq, cpu)[vector];
1294 if (!cpumask_test_cpu(cpu, cfg->domain))
1295 per_cpu(vector_irq, cpu)[vector] = -1;
1297 raw_spin_unlock(&vector_lock);
1300 static struct irq_chip ioapic_chip;
1301 static struct irq_chip ir_ioapic_chip;
1303 #define IOAPIC_AUTO -1
1304 #define IOAPIC_EDGE 0
1305 #define IOAPIC_LEVEL 1
1307 #ifdef CONFIG_X86_32
1308 static inline int IO_APIC_irq_trigger(int irq)
1312 for (apic = 0; apic < nr_ioapics; apic++) {
1313 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1314 idx = find_irq_entry(apic, pin, mp_INT);
1315 if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin)))
1316 return irq_trigger(idx);
1320 * nonexistent IRQs are edge default
1325 static inline int IO_APIC_irq_trigger(int irq)
1331 static void ioapic_register_intr(int irq, struct irq_desc *desc, unsigned long trigger)
1334 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
1335 trigger == IOAPIC_LEVEL)
1336 desc->status |= IRQ_LEVEL;
1338 desc->status &= ~IRQ_LEVEL;
1340 if (irq_remapped(irq)) {
1341 desc->status |= IRQ_MOVE_PCNTXT;
1343 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
1347 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
1348 handle_edge_irq, "edge");
1352 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
1353 trigger == IOAPIC_LEVEL)
1354 set_irq_chip_and_handler_name(irq, &ioapic_chip,
1358 set_irq_chip_and_handler_name(irq, &ioapic_chip,
1359 handle_edge_irq, "edge");
1362 int setup_ioapic_entry(int apic_id, int irq,
1363 struct IO_APIC_route_entry *entry,
1364 unsigned int destination, int trigger,
1365 int polarity, int vector, int pin)
1368 * add it to the IO-APIC irq-routing table:
1370 memset(entry,0,sizeof(*entry));
1372 if (intr_remapping_enabled) {
1373 struct intel_iommu *iommu = map_ioapic_to_ir(apic_id);
1375 struct IR_IO_APIC_route_entry *ir_entry =
1376 (struct IR_IO_APIC_route_entry *) entry;
1380 panic("No mapping iommu for ioapic %d\n", apic_id);
1382 index = alloc_irte(iommu, irq, 1);
1384 panic("Failed to allocate IRTE for ioapic %d\n", apic_id);
1386 prepare_irte(&irte, vector, destination);
1388 /* Set source-id of interrupt request */
1389 set_ioapic_sid(&irte, apic_id);
1391 modify_irte(irq, &irte);
1393 ir_entry->index2 = (index >> 15) & 0x1;
1395 ir_entry->format = 1;
1396 ir_entry->index = (index & 0x7fff);
1398 * IO-APIC RTE will be configured with virtual vector.
1399 * irq handler will do the explicit EOI to the io-apic.
1401 ir_entry->vector = pin;
1403 entry->delivery_mode = apic->irq_delivery_mode;
1404 entry->dest_mode = apic->irq_dest_mode;
1405 entry->dest = destination;
1406 entry->vector = vector;
1409 entry->mask = 0; /* enable IRQ */
1410 entry->trigger = trigger;
1411 entry->polarity = polarity;
1413 /* Mask level triggered irqs.
1414 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
1421 static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq_desc *desc,
1422 int trigger, int polarity)
1424 struct irq_cfg *cfg;
1425 struct IO_APIC_route_entry entry;
1428 if (!IO_APIC_IRQ(irq))
1431 cfg = get_irq_desc_chip_data(desc);
1434 * For legacy irqs, cfg->domain starts with cpu 0 for legacy
1435 * controllers like 8259. Now that IO-APIC can handle this irq, update
1438 if (irq < legacy_pic->nr_legacy_irqs && cpumask_test_cpu(0, cfg->domain))
1439 apic->vector_allocation_domain(0, cfg->domain);
1441 if (assign_irq_vector(irq, cfg, apic->target_cpus()))
1444 dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
1446 apic_printk(APIC_VERBOSE,KERN_DEBUG
1447 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
1448 "IRQ %d Mode:%i Active:%i)\n",
1449 apic_id, mp_ioapics[apic_id].apicid, pin, cfg->vector,
1450 irq, trigger, polarity);
1453 if (setup_ioapic_entry(mp_ioapics[apic_id].apicid, irq, &entry,
1454 dest, trigger, polarity, cfg->vector, pin)) {
1455 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1456 mp_ioapics[apic_id].apicid, pin);
1457 __clear_irq_vector(irq, cfg);
1461 ioapic_register_intr(irq, desc, trigger);
1462 if (irq < legacy_pic->nr_legacy_irqs)
1463 legacy_pic->mask(irq);
1465 ioapic_write_entry(apic_id, pin, entry);
1469 DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1);
1470 } mp_ioapic_routing[MAX_IO_APICS];
1472 static void __init setup_IO_APIC_irqs(void)
1474 int apic_id, pin, idx, irq;
1476 struct irq_desc *desc;
1477 struct irq_cfg *cfg;
1478 int node = cpu_to_node(0);
1480 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
1482 for (apic_id = 0; apic_id < nr_ioapics; apic_id++)
1483 for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) {
1484 idx = find_irq_entry(apic_id, pin, mp_INT);
1488 apic_printk(APIC_VERBOSE,
1489 KERN_DEBUG " %d-%d",
1490 mp_ioapics[apic_id].apicid, pin);
1492 apic_printk(APIC_VERBOSE, " %d-%d",
1493 mp_ioapics[apic_id].apicid, pin);
1497 apic_printk(APIC_VERBOSE,
1498 " (apicid-pin) not connected\n");
1502 irq = pin_2_irq(idx, apic_id, pin);
1504 if ((apic_id > 0) && (irq > 16))
1508 * Skip the timer IRQ if there's a quirk handler
1509 * installed and if it returns 1:
1511 if (apic->multi_timer_check &&
1512 apic->multi_timer_check(apic_id, irq))
1515 desc = irq_to_desc_alloc_node(irq, node);
1517 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
1520 cfg = get_irq_desc_chip_data(desc);
1521 add_pin_to_irq_node(cfg, node, apic_id, pin);
1523 * don't mark it in pin_programmed, so later acpi could
1524 * set it correctly when irq < 16
1526 setup_IO_APIC_irq(apic_id, pin, irq, desc,
1527 irq_trigger(idx), irq_polarity(idx));
1531 apic_printk(APIC_VERBOSE,
1532 " (apicid-pin) not connected\n");
1536 * for the gsit that is not in first ioapic
1537 * but could not use acpi_register_gsi()
1538 * like some special sci in IBM x3330
1540 void setup_IO_APIC_irq_extra(u32 gsi)
1542 int apic_id = 0, pin, idx, irq;
1543 int node = cpu_to_node(0);
1544 struct irq_desc *desc;
1545 struct irq_cfg *cfg;
1548 * Convert 'gsi' to 'ioapic.pin'.
1550 apic_id = mp_find_ioapic(gsi);
1554 pin = mp_find_ioapic_pin(apic_id, gsi);
1555 idx = find_irq_entry(apic_id, pin, mp_INT);
1559 irq = pin_2_irq(idx, apic_id, pin);
1560 #ifdef CONFIG_SPARSE_IRQ
1561 desc = irq_to_desc(irq);
1565 desc = irq_to_desc_alloc_node(irq, node);
1567 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
1571 cfg = get_irq_desc_chip_data(desc);
1572 add_pin_to_irq_node(cfg, node, apic_id, pin);
1574 if (test_bit(pin, mp_ioapic_routing[apic_id].pin_programmed)) {
1575 pr_debug("Pin %d-%d already programmed\n",
1576 mp_ioapics[apic_id].apicid, pin);
1579 set_bit(pin, mp_ioapic_routing[apic_id].pin_programmed);
1581 setup_IO_APIC_irq(apic_id, pin, irq, desc,
1582 irq_trigger(idx), irq_polarity(idx));
1586 * Set up the timer pin, possibly with the 8259A-master behind.
1588 static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin,
1591 struct IO_APIC_route_entry entry;
1593 if (intr_remapping_enabled)
1596 memset(&entry, 0, sizeof(entry));
1599 * We use logical delivery to get the timer IRQ
1602 entry.dest_mode = apic->irq_dest_mode;
1603 entry.mask = 0; /* don't mask IRQ for edge */
1604 entry.dest = apic->cpu_mask_to_apicid(apic->target_cpus());
1605 entry.delivery_mode = apic->irq_delivery_mode;
1608 entry.vector = vector;
1611 * The timer IRQ doesn't have to know that behind the
1612 * scene we may have a 8259A-master in AEOI mode ...
1614 set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
1617 * Add it to the IO-APIC irq-routing table:
1619 ioapic_write_entry(apic_id, pin, entry);
1623 __apicdebuginit(void) print_IO_APIC(void)
1626 union IO_APIC_reg_00 reg_00;
1627 union IO_APIC_reg_01 reg_01;
1628 union IO_APIC_reg_02 reg_02;
1629 union IO_APIC_reg_03 reg_03;
1630 unsigned long flags;
1631 struct irq_cfg *cfg;
1632 struct irq_desc *desc;
1635 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
1636 for (i = 0; i < nr_ioapics; i++)
1637 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
1638 mp_ioapics[i].apicid, nr_ioapic_registers[i]);
1641 * We are a bit conservative about what we expect. We have to
1642 * know about every hardware change ASAP.
1644 printk(KERN_INFO "testing the IO APIC.......................\n");
1646 for (apic = 0; apic < nr_ioapics; apic++) {
1648 raw_spin_lock_irqsave(&ioapic_lock, flags);
1649 reg_00.raw = io_apic_read(apic, 0);
1650 reg_01.raw = io_apic_read(apic, 1);
1651 if (reg_01.bits.version >= 0x10)
1652 reg_02.raw = io_apic_read(apic, 2);
1653 if (reg_01.bits.version >= 0x20)
1654 reg_03.raw = io_apic_read(apic, 3);
1655 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1658 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].apicid);
1659 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
1660 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
1661 printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
1662 printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS);
1664 printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)®_01);
1665 printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
1667 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
1668 printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
1671 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
1672 * but the value of reg_02 is read as the previous read register
1673 * value, so ignore it if reg_02 == reg_01.
1675 if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
1676 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
1677 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
1681 * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
1682 * or reg_03, but the value of reg_0[23] is read as the previous read
1683 * register value, so ignore it if reg_03 == reg_0[12].
1685 if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
1686 reg_03.raw != reg_01.raw) {
1687 printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
1688 printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT);
1691 printk(KERN_DEBUG ".... IRQ redirection table:\n");
1693 printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol"
1694 " Stat Dmod Deli Vect:\n");
1696 for (i = 0; i <= reg_01.bits.entries; i++) {
1697 struct IO_APIC_route_entry entry;
1699 entry = ioapic_read_entry(apic, i);
1701 printk(KERN_DEBUG " %02x %03X ",
1706 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
1711 entry.delivery_status,
1713 entry.delivery_mode,
1718 printk(KERN_DEBUG "IRQ to pin mappings:\n");
1719 for_each_irq_desc(irq, desc) {
1720 struct irq_pin_list *entry;
1722 cfg = get_irq_desc_chip_data(desc);
1725 entry = cfg->irq_2_pin;
1728 printk(KERN_DEBUG "IRQ%d ", irq);
1729 for_each_irq_pin(entry, cfg->irq_2_pin)
1730 printk("-> %d:%d", entry->apic, entry->pin);
1734 printk(KERN_INFO ".................................... done.\n");
1739 __apicdebuginit(void) print_APIC_field(int base)
1745 for (i = 0; i < 8; i++)
1746 printk(KERN_CONT "%08x", apic_read(base + i*0x10));
1748 printk(KERN_CONT "\n");
1751 __apicdebuginit(void) print_local_APIC(void *dummy)
1753 unsigned int i, v, ver, maxlvt;
1756 printk(KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
1757 smp_processor_id(), hard_smp_processor_id());
1758 v = apic_read(APIC_ID);
1759 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id());
1760 v = apic_read(APIC_LVR);
1761 printk(KERN_INFO "... APIC VERSION: %08x\n", v);
1762 ver = GET_APIC_VERSION(v);
1763 maxlvt = lapic_get_maxlvt();
1765 v = apic_read(APIC_TASKPRI);
1766 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1768 if (APIC_INTEGRATED(ver)) { /* !82489DX */
1769 if (!APIC_XAPIC(ver)) {
1770 v = apic_read(APIC_ARBPRI);
1771 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
1772 v & APIC_ARBPRI_MASK);
1774 v = apic_read(APIC_PROCPRI);
1775 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
1779 * Remote read supported only in the 82489DX and local APIC for
1780 * Pentium processors.
1782 if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
1783 v = apic_read(APIC_RRR);
1784 printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
1787 v = apic_read(APIC_LDR);
1788 printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
1789 if (!x2apic_enabled()) {
1790 v = apic_read(APIC_DFR);
1791 printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
1793 v = apic_read(APIC_SPIV);
1794 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
1796 printk(KERN_DEBUG "... APIC ISR field:\n");
1797 print_APIC_field(APIC_ISR);
1798 printk(KERN_DEBUG "... APIC TMR field:\n");
1799 print_APIC_field(APIC_TMR);
1800 printk(KERN_DEBUG "... APIC IRR field:\n");
1801 print_APIC_field(APIC_IRR);
1803 if (APIC_INTEGRATED(ver)) { /* !82489DX */
1804 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
1805 apic_write(APIC_ESR, 0);
1807 v = apic_read(APIC_ESR);
1808 printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
1811 icr = apic_icr_read();
1812 printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr);
1813 printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32));
1815 v = apic_read(APIC_LVTT);
1816 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
1818 if (maxlvt > 3) { /* PC is LVT#4. */
1819 v = apic_read(APIC_LVTPC);
1820 printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
1822 v = apic_read(APIC_LVT0);
1823 printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
1824 v = apic_read(APIC_LVT1);
1825 printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
1827 if (maxlvt > 2) { /* ERR is LVT#3. */
1828 v = apic_read(APIC_LVTERR);
1829 printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
1832 v = apic_read(APIC_TMICT);
1833 printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
1834 v = apic_read(APIC_TMCCT);
1835 printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
1836 v = apic_read(APIC_TDCR);
1837 printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
1839 if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
1840 v = apic_read(APIC_EFEAT);
1841 maxlvt = (v >> 16) & 0xff;
1842 printk(KERN_DEBUG "... APIC EFEAT: %08x\n", v);
1843 v = apic_read(APIC_ECTRL);
1844 printk(KERN_DEBUG "... APIC ECTRL: %08x\n", v);
1845 for (i = 0; i < maxlvt; i++) {
1846 v = apic_read(APIC_EILVTn(i));
1847 printk(KERN_DEBUG "... APIC EILVT%d: %08x\n", i, v);
1853 __apicdebuginit(void) print_local_APICs(int maxcpu)
1861 for_each_online_cpu(cpu) {
1864 smp_call_function_single(cpu, print_local_APIC, NULL, 1);
1869 __apicdebuginit(void) print_PIC(void)
1872 unsigned long flags;
1874 if (!legacy_pic->nr_legacy_irqs)
1877 printk(KERN_DEBUG "\nprinting PIC contents\n");
1879 raw_spin_lock_irqsave(&i8259A_lock, flags);
1881 v = inb(0xa1) << 8 | inb(0x21);
1882 printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
1884 v = inb(0xa0) << 8 | inb(0x20);
1885 printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
1889 v = inb(0xa0) << 8 | inb(0x20);
1893 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
1895 printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
1897 v = inb(0x4d1) << 8 | inb(0x4d0);
1898 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
1901 static int __initdata show_lapic = 1;
1902 static __init int setup_show_lapic(char *arg)
1906 if (strcmp(arg, "all") == 0) {
1907 show_lapic = CONFIG_NR_CPUS;
1909 get_option(&arg, &num);
1916 __setup("show_lapic=", setup_show_lapic);
1918 __apicdebuginit(int) print_ICs(void)
1920 if (apic_verbosity == APIC_QUIET)
1925 /* don't print out if apic is not there */
1926 if (!cpu_has_apic && !apic_from_smp_config())
1929 print_local_APICs(show_lapic);
1935 fs_initcall(print_ICs);
1938 /* Where if anywhere is the i8259 connect in external int mode */
1939 static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
1941 void __init enable_IO_APIC(void)
1943 int i8259_apic, i8259_pin;
1946 if (!legacy_pic->nr_legacy_irqs)
1949 for(apic = 0; apic < nr_ioapics; apic++) {
1951 /* See if any of the pins is in ExtINT mode */
1952 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1953 struct IO_APIC_route_entry entry;
1954 entry = ioapic_read_entry(apic, pin);
1956 /* If the interrupt line is enabled and in ExtInt mode
1957 * I have found the pin where the i8259 is connected.
1959 if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
1960 ioapic_i8259.apic = apic;
1961 ioapic_i8259.pin = pin;
1967 /* Look to see what if the MP table has reported the ExtINT */
1968 /* If we could not find the appropriate pin by looking at the ioapic
1969 * the i8259 probably is not connected the ioapic but give the
1970 * mptable a chance anyway.
1972 i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
1973 i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
1974 /* Trust the MP table if nothing is setup in the hardware */
1975 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
1976 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
1977 ioapic_i8259.pin = i8259_pin;
1978 ioapic_i8259.apic = i8259_apic;
1980 /* Complain if the MP table and the hardware disagree */
1981 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
1982 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
1984 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
1988 * Do not trust the IO-APIC being empty at bootup
1994 * Not an __init, needed by the reboot code
1996 void disable_IO_APIC(void)
1999 * Clear the IO-APIC before rebooting:
2003 if (!legacy_pic->nr_legacy_irqs)
2007 * If the i8259 is routed through an IOAPIC
2008 * Put that IOAPIC in virtual wire mode
2009 * so legacy interrupts can be delivered.
2011 * With interrupt-remapping, for now we will use virtual wire A mode,
2012 * as virtual wire B is little complex (need to configure both
2013 * IOAPIC RTE aswell as interrupt-remapping table entry).
2014 * As this gets called during crash dump, keep this simple for now.
2016 if (ioapic_i8259.pin != -1 && !intr_remapping_enabled) {
2017 struct IO_APIC_route_entry entry;
2019 memset(&entry, 0, sizeof(entry));
2020 entry.mask = 0; /* Enabled */
2021 entry.trigger = 0; /* Edge */
2023 entry.polarity = 0; /* High */
2024 entry.delivery_status = 0;
2025 entry.dest_mode = 0; /* Physical */
2026 entry.delivery_mode = dest_ExtINT; /* ExtInt */
2028 entry.dest = read_apic_id();
2031 * Add it to the IO-APIC irq-routing table:
2033 ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
2037 * Use virtual wire A mode when interrupt remapping is enabled.
2039 if (cpu_has_apic || apic_from_smp_config())
2040 disconnect_bsp_APIC(!intr_remapping_enabled &&
2041 ioapic_i8259.pin != -1);
2044 #ifdef CONFIG_X86_32
2046 * function to set the IO-APIC physical IDs based on the
2047 * values stored in the MPC table.
2049 * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
2052 void __init setup_ioapic_ids_from_mpc(void)
2054 union IO_APIC_reg_00 reg_00;
2055 physid_mask_t phys_id_present_map;
2058 unsigned char old_id;
2059 unsigned long flags;
2064 * Don't check I/O APIC IDs for xAPIC systems. They have
2065 * no meaning without the serial APIC bus.
2067 if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
2068 || APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
2071 * This is broken; anything with a real cpu count has to
2072 * circumvent this idiocy regardless.
2074 apic->ioapic_phys_id_map(&phys_cpu_present_map, &phys_id_present_map);
2077 * Set the IOAPIC ID to the value stored in the MPC table.
2079 for (apic_id = 0; apic_id < nr_ioapics; apic_id++) {
2081 /* Read the register 0 value */
2082 raw_spin_lock_irqsave(&ioapic_lock, flags);
2083 reg_00.raw = io_apic_read(apic_id, 0);
2084 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2086 old_id = mp_ioapics[apic_id].apicid;
2088 if (mp_ioapics[apic_id].apicid >= get_physical_broadcast()) {
2089 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
2090 apic_id, mp_ioapics[apic_id].apicid);
2091 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
2093 mp_ioapics[apic_id].apicid = reg_00.bits.ID;
2097 * Sanity check, is the ID really free? Every APIC in a
2098 * system must have a unique ID or we get lots of nice
2099 * 'stuck on smp_invalidate_needed IPI wait' messages.
2101 if (apic->check_apicid_used(&phys_id_present_map,
2102 mp_ioapics[apic_id].apicid)) {
2103 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
2104 apic_id, mp_ioapics[apic_id].apicid);
2105 for (i = 0; i < get_physical_broadcast(); i++)
2106 if (!physid_isset(i, phys_id_present_map))
2108 if (i >= get_physical_broadcast())
2109 panic("Max APIC ID exceeded!\n");
2110 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
2112 physid_set(i, phys_id_present_map);
2113 mp_ioapics[apic_id].apicid = i;
2116 apic->apicid_to_cpu_present(mp_ioapics[apic_id].apicid, &tmp);
2117 apic_printk(APIC_VERBOSE, "Setting %d in the "
2118 "phys_id_present_map\n",
2119 mp_ioapics[apic_id].apicid);
2120 physids_or(phys_id_present_map, phys_id_present_map, tmp);
2125 * We need to adjust the IRQ routing table
2126 * if the ID changed.
2128 if (old_id != mp_ioapics[apic_id].apicid)
2129 for (i = 0; i < mp_irq_entries; i++)
2130 if (mp_irqs[i].dstapic == old_id)
2132 = mp_ioapics[apic_id].apicid;
2135 * Read the right value from the MPC table and
2136 * write it into the ID register.
2138 apic_printk(APIC_VERBOSE, KERN_INFO
2139 "...changing IO-APIC physical APIC ID to %d ...",
2140 mp_ioapics[apic_id].apicid);
2142 reg_00.bits.ID = mp_ioapics[apic_id].apicid;
2143 raw_spin_lock_irqsave(&ioapic_lock, flags);
2144 io_apic_write(apic_id, 0, reg_00.raw);
2145 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2150 raw_spin_lock_irqsave(&ioapic_lock, flags);
2151 reg_00.raw = io_apic_read(apic_id, 0);
2152 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2153 if (reg_00.bits.ID != mp_ioapics[apic_id].apicid)
2154 printk("could not set ID!\n");
2156 apic_printk(APIC_VERBOSE, " ok.\n");
2161 int no_timer_check __initdata;
2163 static int __init notimercheck(char *s)
2168 __setup("no_timer_check", notimercheck);
2171 * There is a nasty bug in some older SMP boards, their mptable lies
2172 * about the timer IRQ. We do the following to work around the situation:
2174 * - timer IRQ defaults to IO-APIC IRQ
2175 * - if this function detects that timer IRQs are defunct, then we fall
2176 * back to ISA timer IRQs
2178 static int __init timer_irq_works(void)
2180 unsigned long t1 = jiffies;
2181 unsigned long flags;
2186 local_save_flags(flags);
2188 /* Let ten ticks pass... */
2189 mdelay((10 * 1000) / HZ);
2190 local_irq_restore(flags);
2193 * Expect a few ticks at least, to be sure some possible
2194 * glue logic does not lock up after one or two first
2195 * ticks in a non-ExtINT mode. Also the local APIC
2196 * might have cached one ExtINT interrupt. Finally, at
2197 * least one tick may be lost due to delays.
2201 if (time_after(jiffies, t1 + 4))
2207 * In the SMP+IOAPIC case it might happen that there are an unspecified
2208 * number of pending IRQ events unhandled. These cases are very rare,
2209 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
2210 * better to do it this way as thus we do not have to be aware of
2211 * 'pending' interrupts in the IRQ path, except at this point.
2214 * Edge triggered needs to resend any interrupt
2215 * that was delayed but this is now handled in the device
2220 * Starting up a edge-triggered IO-APIC interrupt is
2221 * nasty - we need to make sure that we get the edge.
2222 * If it is already asserted for some reason, we need
2223 * return 1 to indicate that is was pending.
2225 * This is not complete - we should be able to fake
2226 * an edge even if it isn't on the 8259A...
2229 static unsigned int startup_ioapic_irq(unsigned int irq)
2231 int was_pending = 0;
2232 unsigned long flags;
2233 struct irq_cfg *cfg;
2235 raw_spin_lock_irqsave(&ioapic_lock, flags);
2236 if (irq < legacy_pic->nr_legacy_irqs) {
2237 legacy_pic->mask(irq);
2238 if (legacy_pic->irq_pending(irq))
2242 __unmask_IO_APIC_irq(cfg);
2243 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2248 static int ioapic_retrigger_irq(unsigned int irq)
2251 struct irq_cfg *cfg = irq_cfg(irq);
2252 unsigned long flags;
2254 raw_spin_lock_irqsave(&vector_lock, flags);
2255 apic->send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector);
2256 raw_spin_unlock_irqrestore(&vector_lock, flags);
2262 * Level and edge triggered IO-APIC interrupts need different handling,
2263 * so we use two separate IRQ descriptors. Edge triggered IRQs can be
2264 * handled with the level-triggered descriptor, but that one has slightly
2265 * more overhead. Level-triggered interrupts cannot be handled with the
2266 * edge-triggered handler, without risking IRQ storms and other ugly
2271 void send_cleanup_vector(struct irq_cfg *cfg)
2273 cpumask_var_t cleanup_mask;
2275 if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
2277 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
2278 apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR);
2280 cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
2281 apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
2282 free_cpumask_var(cleanup_mask);
2284 cfg->move_in_progress = 0;
2287 static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
2290 struct irq_pin_list *entry;
2291 u8 vector = cfg->vector;
2293 for_each_irq_pin(entry, cfg->irq_2_pin) {
2299 * With interrupt-remapping, destination information comes
2300 * from interrupt-remapping table entry.
2302 if (!irq_remapped(irq))
2303 io_apic_write(apic, 0x11 + pin*2, dest);
2304 reg = io_apic_read(apic, 0x10 + pin*2);
2305 reg &= ~IO_APIC_REDIR_VECTOR_MASK;
2307 io_apic_modify(apic, 0x10 + pin*2, reg);
2312 * Either sets desc->affinity to a valid value, and returns
2313 * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and
2314 * leaves desc->affinity untouched.
2317 set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask,
2318 unsigned int *dest_id)
2320 struct irq_cfg *cfg;
2323 if (!cpumask_intersects(mask, cpu_online_mask))
2327 cfg = get_irq_desc_chip_data(desc);
2328 if (assign_irq_vector(irq, cfg, mask))
2331 cpumask_copy(desc->affinity, mask);
2333 *dest_id = apic->cpu_mask_to_apicid_and(desc->affinity, cfg->domain);
2338 set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
2340 struct irq_cfg *cfg;
2341 unsigned long flags;
2347 cfg = get_irq_desc_chip_data(desc);
2349 raw_spin_lock_irqsave(&ioapic_lock, flags);
2350 ret = set_desc_affinity(desc, mask, &dest);
2352 /* Only the high 8 bits are valid. */
2353 dest = SET_APIC_LOGICAL_ID(dest);
2354 __target_IO_APIC_irq(irq, dest, cfg);
2356 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2362 set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask)
2364 struct irq_desc *desc;
2366 desc = irq_to_desc(irq);
2368 return set_ioapic_affinity_irq_desc(desc, mask);
2371 #ifdef CONFIG_INTR_REMAP
2374 * Migrate the IO-APIC irq in the presence of intr-remapping.
2376 * For both level and edge triggered, irq migration is a simple atomic
2377 * update(of vector and cpu destination) of IRTE and flush the hardware cache.
2379 * For level triggered, we eliminate the io-apic RTE modification (with the
2380 * updated vector information), by using a virtual vector (io-apic pin number).
2381 * Real vector that is used for interrupting cpu will be coming from
2382 * the interrupt-remapping table entry.
2385 migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
2387 struct irq_cfg *cfg;
2393 if (!cpumask_intersects(mask, cpu_online_mask))
2397 if (get_irte(irq, &irte))
2400 cfg = get_irq_desc_chip_data(desc);
2401 if (assign_irq_vector(irq, cfg, mask))
2404 dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask);
2406 irte.vector = cfg->vector;
2407 irte.dest_id = IRTE_DEST(dest);
2410 * Modified the IRTE and flushes the Interrupt entry cache.
2412 modify_irte(irq, &irte);
2414 if (cfg->move_in_progress)
2415 send_cleanup_vector(cfg);
2417 cpumask_copy(desc->affinity, mask);
2423 * Migrates the IRQ destination in the process context.
2425 static int set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc,
2426 const struct cpumask *mask)
2428 return migrate_ioapic_irq_desc(desc, mask);
2430 static int set_ir_ioapic_affinity_irq(unsigned int irq,
2431 const struct cpumask *mask)
2433 struct irq_desc *desc = irq_to_desc(irq);
2435 return set_ir_ioapic_affinity_irq_desc(desc, mask);
2438 static inline int set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc,
2439 const struct cpumask *mask)
2445 asmlinkage void smp_irq_move_cleanup_interrupt(void)
2447 unsigned vector, me;
2453 me = smp_processor_id();
2454 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
2457 struct irq_desc *desc;
2458 struct irq_cfg *cfg;
2459 irq = __get_cpu_var(vector_irq)[vector];
2464 desc = irq_to_desc(irq);
2469 raw_spin_lock(&desc->lock);
2472 * Check if the irq migration is in progress. If so, we
2473 * haven't received the cleanup request yet for this irq.
2475 if (cfg->move_in_progress)
2478 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2481 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
2483 * Check if the vector that needs to be cleanedup is
2484 * registered at the cpu's IRR. If so, then this is not
2485 * the best time to clean it up. Lets clean it up in the
2486 * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
2489 if (irr & (1 << (vector % 32))) {
2490 apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
2493 __get_cpu_var(vector_irq)[vector] = -1;
2495 raw_spin_unlock(&desc->lock);
2501 static void __irq_complete_move(struct irq_desc **descp, unsigned vector)
2503 struct irq_desc *desc = *descp;
2504 struct irq_cfg *cfg = get_irq_desc_chip_data(desc);
2507 if (likely(!cfg->move_in_progress))
2510 me = smp_processor_id();
2512 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2513 send_cleanup_vector(cfg);
2516 static void irq_complete_move(struct irq_desc **descp)
2518 __irq_complete_move(descp, ~get_irq_regs()->orig_ax);
2521 void irq_force_complete_move(int irq)
2523 struct irq_desc *desc = irq_to_desc(irq);
2524 struct irq_cfg *cfg = get_irq_desc_chip_data(desc);
2529 __irq_complete_move(&desc, cfg->vector);
2532 static inline void irq_complete_move(struct irq_desc **descp) {}
2535 static void ack_apic_edge(unsigned int irq)
2537 struct irq_desc *desc = irq_to_desc(irq);
2539 irq_complete_move(&desc);
2540 move_native_irq(irq);
2544 atomic_t irq_mis_count;
2547 * IO-APIC versions below 0x20 don't support EOI register.
2548 * For the record, here is the information about various versions:
2550 * 1Xh I/OAPIC or I/O(x)APIC which are not PCI 2.2 Compliant
2551 * 2Xh I/O(x)APIC which is PCI 2.2 Compliant
2554 * Some of the Intel ICH Specs (ICH2 to ICH5) documents the io-apic
2555 * version as 0x2. This is an error with documentation and these ICH chips
2556 * use io-apic's of version 0x20.
2558 * For IO-APIC's with EOI register, we use that to do an explicit EOI.
2559 * Otherwise, we simulate the EOI message manually by changing the trigger
2560 * mode to edge and then back to level, with RTE being masked during this.
2562 static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg)
2564 struct irq_pin_list *entry;
2566 for_each_irq_pin(entry, cfg->irq_2_pin) {
2567 if (mp_ioapics[entry->apic].apicver >= 0x20) {
2569 * Intr-remapping uses pin number as the virtual vector
2570 * in the RTE. Actual vector is programmed in
2571 * intr-remapping table entry. Hence for the io-apic
2572 * EOI we use the pin number.
2574 if (irq_remapped(irq))
2575 io_apic_eoi(entry->apic, entry->pin);
2577 io_apic_eoi(entry->apic, cfg->vector);
2579 __mask_and_edge_IO_APIC_irq(entry);
2580 __unmask_and_level_IO_APIC_irq(entry);
2585 static void eoi_ioapic_irq(struct irq_desc *desc)
2587 struct irq_cfg *cfg;
2588 unsigned long flags;
2592 cfg = get_irq_desc_chip_data(desc);
2594 raw_spin_lock_irqsave(&ioapic_lock, flags);
2595 __eoi_ioapic_irq(irq, cfg);
2596 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2599 static void ack_apic_level(unsigned int irq)
2601 struct irq_desc *desc = irq_to_desc(irq);
2604 struct irq_cfg *cfg;
2605 int do_unmask_irq = 0;
2607 irq_complete_move(&desc);
2608 #ifdef CONFIG_GENERIC_PENDING_IRQ
2609 /* If we are moving the irq we need to mask it */
2610 if (unlikely(desc->status & IRQ_MOVE_PENDING)) {
2612 mask_IO_APIC_irq_desc(desc);
2617 * It appears there is an erratum which affects at least version 0x11
2618 * of I/O APIC (that's the 82093AA and cores integrated into various
2619 * chipsets). Under certain conditions a level-triggered interrupt is
2620 * erroneously delivered as edge-triggered one but the respective IRR
2621 * bit gets set nevertheless. As a result the I/O unit expects an EOI
2622 * message but it will never arrive and further interrupts are blocked
2623 * from the source. The exact reason is so far unknown, but the
2624 * phenomenon was observed when two consecutive interrupt requests
2625 * from a given source get delivered to the same CPU and the source is
2626 * temporarily disabled in between.
2628 * A workaround is to simulate an EOI message manually. We achieve it
2629 * by setting the trigger mode to edge and then to level when the edge
2630 * trigger mode gets detected in the TMR of a local APIC for a
2631 * level-triggered interrupt. We mask the source for the time of the
2632 * operation to prevent an edge-triggered interrupt escaping meanwhile.
2633 * The idea is from Manfred Spraul. --macro
2635 * Also in the case when cpu goes offline, fixup_irqs() will forward
2636 * any unhandled interrupt on the offlined cpu to the new cpu
2637 * destination that is handling the corresponding interrupt. This
2638 * interrupt forwarding is done via IPI's. Hence, in this case also
2639 * level-triggered io-apic interrupt will be seen as an edge
2640 * interrupt in the IRR. And we can't rely on the cpu's EOI
2641 * to be broadcasted to the IO-APIC's which will clear the remoteIRR
2642 * corresponding to the level-triggered interrupt. Hence on IO-APIC's
2643 * supporting EOI register, we do an explicit EOI to clear the
2644 * remote IRR and on IO-APIC's which don't have an EOI register,
2645 * we use the above logic (mask+edge followed by unmask+level) from
2646 * Manfred Spraul to clear the remote IRR.
2648 cfg = get_irq_desc_chip_data(desc);
2650 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
2653 * We must acknowledge the irq before we move it or the acknowledge will
2654 * not propagate properly.
2659 * Tail end of clearing remote IRR bit (either by delivering the EOI
2660 * message via io-apic EOI register write or simulating it using
2661 * mask+edge followed by unnask+level logic) manually when the
2662 * level triggered interrupt is seen as the edge triggered interrupt
2665 if (!(v & (1 << (i & 0x1f)))) {
2666 atomic_inc(&irq_mis_count);
2668 eoi_ioapic_irq(desc);
2671 /* Now we can move and renable the irq */
2672 if (unlikely(do_unmask_irq)) {
2673 /* Only migrate the irq if the ack has been received.
2675 * On rare occasions the broadcast level triggered ack gets
2676 * delayed going to ioapics, and if we reprogram the
2677 * vector while Remote IRR is still set the irq will never
2680 * To prevent this scenario we read the Remote IRR bit
2681 * of the ioapic. This has two effects.
2682 * - On any sane system the read of the ioapic will
2683 * flush writes (and acks) going to the ioapic from
2685 * - We get to see if the ACK has actually been delivered.
2687 * Based on failed experiments of reprogramming the
2688 * ioapic entry from outside of irq context starting
2689 * with masking the ioapic entry and then polling until
2690 * Remote IRR was clear before reprogramming the
2691 * ioapic I don't trust the Remote IRR bit to be
2692 * completey accurate.
2694 * However there appears to be no other way to plug
2695 * this race, so if the Remote IRR bit is not
2696 * accurate and is causing problems then it is a hardware bug
2697 * and you can go talk to the chipset vendor about it.
2699 cfg = get_irq_desc_chip_data(desc);
2700 if (!io_apic_level_ack_pending(cfg))
2701 move_masked_irq(irq);
2702 unmask_IO_APIC_irq_desc(desc);
2706 #ifdef CONFIG_INTR_REMAP
2707 static void ir_ack_apic_edge(unsigned int irq)
2712 static void ir_ack_apic_level(unsigned int irq)
2714 struct irq_desc *desc = irq_to_desc(irq);
2717 eoi_ioapic_irq(desc);
2719 #endif /* CONFIG_INTR_REMAP */
2721 static struct irq_chip ioapic_chip __read_mostly = {
2723 .startup = startup_ioapic_irq,
2724 .mask = mask_IO_APIC_irq,
2725 .unmask = unmask_IO_APIC_irq,
2726 .ack = ack_apic_edge,
2727 .eoi = ack_apic_level,
2729 .set_affinity = set_ioapic_affinity_irq,
2731 .retrigger = ioapic_retrigger_irq,
2734 static struct irq_chip ir_ioapic_chip __read_mostly = {
2735 .name = "IR-IO-APIC",
2736 .startup = startup_ioapic_irq,
2737 .mask = mask_IO_APIC_irq,
2738 .unmask = unmask_IO_APIC_irq,
2739 #ifdef CONFIG_INTR_REMAP
2740 .ack = ir_ack_apic_edge,
2741 .eoi = ir_ack_apic_level,
2743 .set_affinity = set_ir_ioapic_affinity_irq,
2746 .retrigger = ioapic_retrigger_irq,
2749 static inline void init_IO_APIC_traps(void)
2752 struct irq_desc *desc;
2753 struct irq_cfg *cfg;
2756 * NOTE! The local APIC isn't very good at handling
2757 * multiple interrupts at the same interrupt level.
2758 * As the interrupt level is determined by taking the
2759 * vector number and shifting that right by 4, we
2760 * want to spread these out a bit so that they don't
2761 * all fall in the same interrupt level.
2763 * Also, we've got to be careful not to trash gate
2764 * 0x80, because int 0x80 is hm, kind of importantish. ;)
2766 for_each_irq_desc(irq, desc) {
2767 cfg = get_irq_desc_chip_data(desc);
2768 if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) {
2770 * Hmm.. We don't have an entry for this,
2771 * so default to an old-fashioned 8259
2772 * interrupt if we can..
2774 if (irq < legacy_pic->nr_legacy_irqs)
2775 legacy_pic->make_irq(irq);
2777 /* Strange. Oh, well.. */
2778 desc->chip = &no_irq_chip;
2784 * The local APIC irq-chip implementation:
2787 static void mask_lapic_irq(unsigned int irq)
2791 v = apic_read(APIC_LVT0);
2792 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
2795 static void unmask_lapic_irq(unsigned int irq)
2799 v = apic_read(APIC_LVT0);
2800 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
2803 static void ack_lapic_irq(unsigned int irq)
2808 static struct irq_chip lapic_chip __read_mostly = {
2809 .name = "local-APIC",
2810 .mask = mask_lapic_irq,
2811 .unmask = unmask_lapic_irq,
2812 .ack = ack_lapic_irq,
2815 static void lapic_register_intr(int irq, struct irq_desc *desc)
2817 desc->status &= ~IRQ_LEVEL;
2818 set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
2822 static void __init setup_nmi(void)
2825 * Dirty trick to enable the NMI watchdog ...
2826 * We put the 8259A master into AEOI mode and
2827 * unmask on all local APICs LVT0 as NMI.
2829 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
2830 * is from Maciej W. Rozycki - so we do not have to EOI from
2831 * the NMI handler or the timer interrupt.
2833 apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ...");
2835 enable_NMI_through_LVT0();
2837 apic_printk(APIC_VERBOSE, " done.\n");
2841 * This looks a bit hackish but it's about the only one way of sending
2842 * a few INTA cycles to 8259As and any associated glue logic. ICR does
2843 * not support the ExtINT mode, unfortunately. We need to send these
2844 * cycles as some i82489DX-based boards have glue logic that keeps the
2845 * 8259A interrupt line asserted until INTA. --macro
2847 static inline void __init unlock_ExtINT_logic(void)
2850 struct IO_APIC_route_entry entry0, entry1;
2851 unsigned char save_control, save_freq_select;
2853 pin = find_isa_irq_pin(8, mp_INT);
2858 apic = find_isa_irq_apic(8, mp_INT);
2864 entry0 = ioapic_read_entry(apic, pin);
2865 clear_IO_APIC_pin(apic, pin);
2867 memset(&entry1, 0, sizeof(entry1));
2869 entry1.dest_mode = 0; /* physical delivery */
2870 entry1.mask = 0; /* unmask IRQ now */
2871 entry1.dest = hard_smp_processor_id();
2872 entry1.delivery_mode = dest_ExtINT;
2873 entry1.polarity = entry0.polarity;
2877 ioapic_write_entry(apic, pin, entry1);
2879 save_control = CMOS_READ(RTC_CONTROL);
2880 save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
2881 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
2883 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
2888 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
2892 CMOS_WRITE(save_control, RTC_CONTROL);
2893 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
2894 clear_IO_APIC_pin(apic, pin);
2896 ioapic_write_entry(apic, pin, entry0);
2899 static int disable_timer_pin_1 __initdata;
2900 /* Actually the next is obsolete, but keep it for paranoid reasons -AK */
2901 static int __init disable_timer_pin_setup(char *arg)
2903 disable_timer_pin_1 = 1;
2906 early_param("disable_timer_pin_1", disable_timer_pin_setup);
2908 int timer_through_8259 __initdata;
2911 * This code may look a bit paranoid, but it's supposed to cooperate with
2912 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
2913 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
2914 * fanatically on his truly buggy board.
2916 * FIXME: really need to revamp this for all platforms.
2918 static inline void __init check_timer(void)
2920 struct irq_desc *desc = irq_to_desc(0);
2921 struct irq_cfg *cfg = get_irq_desc_chip_data(desc);
2922 int node = cpu_to_node(0);
2923 int apic1, pin1, apic2, pin2;
2924 unsigned long flags;
2927 local_irq_save(flags);
2930 * get/set the timer IRQ vector:
2932 legacy_pic->mask(0);
2933 assign_irq_vector(0, cfg, apic->target_cpus());
2936 * As IRQ0 is to be enabled in the 8259A, the virtual
2937 * wire has to be disabled in the local APIC. Also
2938 * timer interrupts need to be acknowledged manually in
2939 * the 8259A for the i82489DX when using the NMI
2940 * watchdog as that APIC treats NMIs as level-triggered.
2941 * The AEOI mode will finish them in the 8259A
2944 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
2945 legacy_pic->init(1);
2946 #ifdef CONFIG_X86_32
2950 ver = apic_read(APIC_LVR);
2951 ver = GET_APIC_VERSION(ver);
2952 timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver));
2956 pin1 = find_isa_irq_pin(0, mp_INT);
2957 apic1 = find_isa_irq_apic(0, mp_INT);
2958 pin2 = ioapic_i8259.pin;
2959 apic2 = ioapic_i8259.apic;
2961 apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X "
2962 "apic1=%d pin1=%d apic2=%d pin2=%d\n",
2963 cfg->vector, apic1, pin1, apic2, pin2);
2966 * Some BIOS writers are clueless and report the ExtINTA
2967 * I/O APIC input from the cascaded 8259A as the timer
2968 * interrupt input. So just in case, if only one pin
2969 * was found above, try it both directly and through the
2973 if (intr_remapping_enabled)
2974 panic("BIOS bug: timer not connected to IO-APIC");
2978 } else if (pin2 == -1) {
2985 * Ok, does IRQ0 through the IOAPIC work?
2988 add_pin_to_irq_node(cfg, node, apic1, pin1);
2989 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
2991 /* for edge trigger, setup_IO_APIC_irq already
2992 * leave it unmasked.
2993 * so only need to unmask if it is level-trigger
2994 * do we really have level trigger timer?
2997 idx = find_irq_entry(apic1, pin1, mp_INT);
2998 if (idx != -1 && irq_trigger(idx))
2999 unmask_IO_APIC_irq_desc(desc);
3001 if (timer_irq_works()) {
3002 if (nmi_watchdog == NMI_IO_APIC) {
3004 legacy_pic->unmask(0);
3006 if (disable_timer_pin_1 > 0)
3007 clear_IO_APIC_pin(0, pin1);
3010 if (intr_remapping_enabled)
3011 panic("timer doesn't work through Interrupt-remapped IO-APIC");
3012 local_irq_disable();
3013 clear_IO_APIC_pin(apic1, pin1);
3015 apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
3016 "8254 timer not connected to IO-APIC\n");
3018 apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer "
3019 "(IRQ0) through the 8259A ...\n");
3020 apic_printk(APIC_QUIET, KERN_INFO
3021 "..... (found apic %d pin %d) ...\n", apic2, pin2);
3023 * legacy devices should be connected to IO APIC #0
3025 replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2);
3026 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
3027 legacy_pic->unmask(0);
3028 if (timer_irq_works()) {
3029 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
3030 timer_through_8259 = 1;
3031 if (nmi_watchdog == NMI_IO_APIC) {
3032 legacy_pic->mask(0);
3034 legacy_pic->unmask(0);
3039 * Cleanup, just in case ...
3041 local_irq_disable();
3042 legacy_pic->mask(0);
3043 clear_IO_APIC_pin(apic2, pin2);
3044 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
3047 if (nmi_watchdog == NMI_IO_APIC) {
3048 apic_printk(APIC_QUIET, KERN_WARNING "timer doesn't work "
3049 "through the IO-APIC - disabling NMI Watchdog!\n");
3050 nmi_watchdog = NMI_NONE;
3052 #ifdef CONFIG_X86_32
3056 apic_printk(APIC_QUIET, KERN_INFO
3057 "...trying to set up timer as Virtual Wire IRQ...\n");
3059 lapic_register_intr(0, desc);
3060 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */
3061 legacy_pic->unmask(0);
3063 if (timer_irq_works()) {
3064 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
3067 local_irq_disable();
3068 legacy_pic->mask(0);
3069 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
3070 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
3072 apic_printk(APIC_QUIET, KERN_INFO
3073 "...trying to set up timer as ExtINT IRQ...\n");
3075 legacy_pic->init(0);
3076 legacy_pic->make_irq(0);
3077 apic_write(APIC_LVT0, APIC_DM_EXTINT);
3079 unlock_ExtINT_logic();
3081 if (timer_irq_works()) {
3082 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
3085 local_irq_disable();
3086 apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
3087 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
3088 "report. Then try booting with the 'noapic' option.\n");
3090 local_irq_restore(flags);
3094 * Traditionally ISA IRQ2 is the cascade IRQ, and is not available
3095 * to devices. However there may be an I/O APIC pin available for
3096 * this interrupt regardless. The pin may be left unconnected, but
3097 * typically it will be reused as an ExtINT cascade interrupt for
3098 * the master 8259A. In the MPS case such a pin will normally be
3099 * reported as an ExtINT interrupt in the MP table. With ACPI
3100 * there is no provision for ExtINT interrupts, and in the absence
3101 * of an override it would be treated as an ordinary ISA I/O APIC
3102 * interrupt, that is edge-triggered and unmasked by default. We
3103 * used to do this, but it caused problems on some systems because
3104 * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using
3105 * the same ExtINT cascade interrupt to drive the local APIC of the
3106 * bootstrap processor. Therefore we refrain from routing IRQ2 to
3107 * the I/O APIC in all cases now. No actual device should request
3108 * it anyway. --macro
3110 #define PIC_IRQS (1UL << PIC_CASCADE_IR)
3112 void __init setup_IO_APIC(void)
3116 * calling enable_IO_APIC() is moved to setup_local_APIC for BP
3118 io_apic_irqs = legacy_pic->nr_legacy_irqs ? ~PIC_IRQS : ~0UL;
3120 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
3122 * Set up IO-APIC IRQ routing.
3124 x86_init.mpparse.setup_ioapic_ids();
3127 setup_IO_APIC_irqs();
3128 init_IO_APIC_traps();
3129 if (legacy_pic->nr_legacy_irqs)
3134 * Called after all the initialization is done. If we didnt find any
3135 * APIC bugs then we can allow the modify fast path
3138 static int __init io_apic_bug_finalize(void)
3140 if (sis_apic_bug == -1)
3145 late_initcall(io_apic_bug_finalize);
3147 struct sysfs_ioapic_data {
3148 struct sys_device dev;
3149 struct IO_APIC_route_entry entry[0];
3151 static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
3153 static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
3155 struct IO_APIC_route_entry *entry;
3156 struct sysfs_ioapic_data *data;
3159 data = container_of(dev, struct sysfs_ioapic_data, dev);
3160 entry = data->entry;
3161 for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ )
3162 *entry = ioapic_read_entry(dev->id, i);
3167 static int ioapic_resume(struct sys_device *dev)
3169 struct IO_APIC_route_entry *entry;
3170 struct sysfs_ioapic_data *data;
3171 unsigned long flags;
3172 union IO_APIC_reg_00 reg_00;
3175 data = container_of(dev, struct sysfs_ioapic_data, dev);
3176 entry = data->entry;
3178 raw_spin_lock_irqsave(&ioapic_lock, flags);
3179 reg_00.raw = io_apic_read(dev->id, 0);
3180 if (reg_00.bits.ID != mp_ioapics[dev->id].apicid) {
3181 reg_00.bits.ID = mp_ioapics[dev->id].apicid;
3182 io_apic_write(dev->id, 0, reg_00.raw);
3184 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3185 for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
3186 ioapic_write_entry(dev->id, i, entry[i]);
3191 static struct sysdev_class ioapic_sysdev_class = {
3193 .suspend = ioapic_suspend,
3194 .resume = ioapic_resume,
3197 static int __init ioapic_init_sysfs(void)
3199 struct sys_device * dev;
3202 error = sysdev_class_register(&ioapic_sysdev_class);
3206 for (i = 0; i < nr_ioapics; i++ ) {
3207 size = sizeof(struct sys_device) + nr_ioapic_registers[i]
3208 * sizeof(struct IO_APIC_route_entry);
3209 mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL);
3210 if (!mp_ioapic_data[i]) {
3211 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
3214 dev = &mp_ioapic_data[i]->dev;
3216 dev->cls = &ioapic_sysdev_class;
3217 error = sysdev_register(dev);
3219 kfree(mp_ioapic_data[i]);
3220 mp_ioapic_data[i] = NULL;
3221 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
3229 device_initcall(ioapic_init_sysfs);
3232 * Dynamic irq allocate and deallocation
3234 unsigned int create_irq_nr(unsigned int irq_want, int node)
3236 /* Allocate an unused irq */
3239 unsigned long flags;
3240 struct irq_cfg *cfg_new = NULL;
3241 struct irq_desc *desc_new = NULL;
3244 if (irq_want < nr_irqs_gsi)
3245 irq_want = nr_irqs_gsi;
3247 raw_spin_lock_irqsave(&vector_lock, flags);
3248 for (new = irq_want; new < nr_irqs; new++) {
3249 desc_new = irq_to_desc_alloc_node(new, node);
3251 printk(KERN_INFO "can not get irq_desc for %d\n", new);
3254 cfg_new = get_irq_desc_chip_data(desc_new);
3256 if (cfg_new->vector != 0)
3259 desc_new = move_irq_desc(desc_new, node);
3260 cfg_new = get_irq_desc_chip_data(desc_new);
3262 if (__assign_irq_vector(new, cfg_new, apic->target_cpus()) == 0)
3266 raw_spin_unlock_irqrestore(&vector_lock, flags);
3269 dynamic_irq_init_keep_chip_data(irq);
3274 int create_irq(void)
3276 int node = cpu_to_node(0);
3277 unsigned int irq_want;
3280 irq_want = nr_irqs_gsi;
3281 irq = create_irq_nr(irq_want, node);
3289 void destroy_irq(unsigned int irq)
3291 unsigned long flags;
3293 dynamic_irq_cleanup_keep_chip_data(irq);
3296 raw_spin_lock_irqsave(&vector_lock, flags);
3297 __clear_irq_vector(irq, get_irq_chip_data(irq));
3298 raw_spin_unlock_irqrestore(&vector_lock, flags);
3302 * MSI message composition
3304 #ifdef CONFIG_PCI_MSI
3305 static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
3306 struct msi_msg *msg, u8 hpet_id)
3308 struct irq_cfg *cfg;
3316 err = assign_irq_vector(irq, cfg, apic->target_cpus());
3320 dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
3322 if (irq_remapped(irq)) {
3327 ir_index = map_irq_to_irte_handle(irq, &sub_handle);
3328 BUG_ON(ir_index == -1);
3330 prepare_irte(&irte, cfg->vector, dest);
3332 /* Set source-id of interrupt request */
3334 set_msi_sid(&irte, pdev);
3336 set_hpet_sid(&irte, hpet_id);
3338 modify_irte(irq, &irte);
3340 msg->address_hi = MSI_ADDR_BASE_HI;
3341 msg->data = sub_handle;
3342 msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
3344 MSI_ADDR_IR_INDEX1(ir_index) |
3345 MSI_ADDR_IR_INDEX2(ir_index);
3347 if (x2apic_enabled())
3348 msg->address_hi = MSI_ADDR_BASE_HI |
3349 MSI_ADDR_EXT_DEST_ID(dest);
3351 msg->address_hi = MSI_ADDR_BASE_HI;
3355 ((apic->irq_dest_mode == 0) ?
3356 MSI_ADDR_DEST_MODE_PHYSICAL:
3357 MSI_ADDR_DEST_MODE_LOGICAL) |
3358 ((apic->irq_delivery_mode != dest_LowestPrio) ?
3359 MSI_ADDR_REDIRECTION_CPU:
3360 MSI_ADDR_REDIRECTION_LOWPRI) |
3361 MSI_ADDR_DEST_ID(dest);
3364 MSI_DATA_TRIGGER_EDGE |
3365 MSI_DATA_LEVEL_ASSERT |
3366 ((apic->irq_delivery_mode != dest_LowestPrio) ?
3367 MSI_DATA_DELIVERY_FIXED:
3368 MSI_DATA_DELIVERY_LOWPRI) |
3369 MSI_DATA_VECTOR(cfg->vector);
3375 static int set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
3377 struct irq_desc *desc = irq_to_desc(irq);
3378 struct irq_cfg *cfg;
3382 if (set_desc_affinity(desc, mask, &dest))
3385 cfg = get_irq_desc_chip_data(desc);
3387 __get_cached_msi_msg(desc->irq_data.msi_desc, &msg);
3389 msg.data &= ~MSI_DATA_VECTOR_MASK;
3390 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3391 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3392 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3394 __write_msi_msg(desc->irq_data.msi_desc, &msg);
3398 #ifdef CONFIG_INTR_REMAP
3400 * Migrate the MSI irq to another cpumask. This migration is
3401 * done in the process context using interrupt-remapping hardware.
3404 ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
3406 struct irq_desc *desc = irq_to_desc(irq);
3407 struct irq_cfg *cfg = get_irq_desc_chip_data(desc);
3411 if (get_irte(irq, &irte))
3414 if (set_desc_affinity(desc, mask, &dest))
3417 irte.vector = cfg->vector;
3418 irte.dest_id = IRTE_DEST(dest);
3421 * atomically update the IRTE with the new destination and vector.
3423 modify_irte(irq, &irte);
3426 * After this point, all the interrupts will start arriving
3427 * at the new destination. So, time to cleanup the previous
3428 * vector allocation.
3430 if (cfg->move_in_progress)
3431 send_cleanup_vector(cfg);
3437 #endif /* CONFIG_SMP */
3440 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
3441 * which implement the MSI or MSI-X Capability Structure.
3443 static struct irq_chip msi_chip = {
3445 .irq_unmask = unmask_msi_irq,
3446 .irq_mask = mask_msi_irq,
3447 .ack = ack_apic_edge,
3449 .set_affinity = set_msi_irq_affinity,
3451 .retrigger = ioapic_retrigger_irq,
3454 static struct irq_chip msi_ir_chip = {
3455 .name = "IR-PCI-MSI",
3456 .irq_unmask = unmask_msi_irq,
3457 .irq_mask = mask_msi_irq,
3458 #ifdef CONFIG_INTR_REMAP
3459 .ack = ir_ack_apic_edge,
3461 .set_affinity = ir_set_msi_irq_affinity,
3464 .retrigger = ioapic_retrigger_irq,
3468 * Map the PCI dev to the corresponding remapping hardware unit
3469 * and allocate 'nvec' consecutive interrupt-remapping table entries
3472 static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
3474 struct intel_iommu *iommu;
3477 iommu = map_dev_to_ir(dev);
3480 "Unable to map PCI %s to iommu\n", pci_name(dev));
3484 index = alloc_irte(iommu, irq, nvec);
3487 "Unable to allocate %d IRTE for PCI %s\n", nvec,
3494 static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
3499 ret = msi_compose_msg(dev, irq, &msg, -1);
3503 set_irq_msi(irq, msidesc);
3504 write_msi_msg(irq, &msg);
3506 if (irq_remapped(irq)) {
3507 struct irq_desc *desc = irq_to_desc(irq);
3509 * irq migration in process context
3511 desc->status |= IRQ_MOVE_PCNTXT;
3512 set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge");
3514 set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
3516 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq);
3521 int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
3524 int ret, sub_handle;
3525 struct msi_desc *msidesc;
3526 unsigned int irq_want;
3527 struct intel_iommu *iommu = NULL;
3531 /* x86 doesn't support multiple MSI yet */
3532 if (type == PCI_CAP_ID_MSI && nvec > 1)
3535 node = dev_to_node(&dev->dev);
3536 irq_want = nr_irqs_gsi;
3538 list_for_each_entry(msidesc, &dev->msi_list, list) {
3539 irq = create_irq_nr(irq_want, node);
3543 if (!intr_remapping_enabled)
3548 * allocate the consecutive block of IRTE's
3551 index = msi_alloc_irte(dev, irq, nvec);
3557 iommu = map_dev_to_ir(dev);
3563 * setup the mapping between the irq and the IRTE
3564 * base index, the sub_handle pointing to the
3565 * appropriate interrupt remap table entry.
3567 set_irte_irq(irq, iommu, index, sub_handle);
3570 ret = setup_msi_irq(dev, msidesc, irq);
3582 void arch_teardown_msi_irq(unsigned int irq)
3587 #if defined (CONFIG_DMAR) || defined (CONFIG_INTR_REMAP)
3589 static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
3591 struct irq_desc *desc = irq_to_desc(irq);
3592 struct irq_cfg *cfg;
3596 if (set_desc_affinity(desc, mask, &dest))
3599 cfg = get_irq_desc_chip_data(desc);
3601 dmar_msi_read(irq, &msg);
3603 msg.data &= ~MSI_DATA_VECTOR_MASK;
3604 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3605 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3606 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3608 dmar_msi_write(irq, &msg);
3613 #endif /* CONFIG_SMP */
3615 static struct irq_chip dmar_msi_type = {
3617 .unmask = dmar_msi_unmask,
3618 .mask = dmar_msi_mask,
3619 .ack = ack_apic_edge,
3621 .set_affinity = dmar_msi_set_affinity,
3623 .retrigger = ioapic_retrigger_irq,
3626 int arch_setup_dmar_msi(unsigned int irq)
3631 ret = msi_compose_msg(NULL, irq, &msg, -1);
3634 dmar_msi_write(irq, &msg);
3635 set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
3641 #ifdef CONFIG_HPET_TIMER
3644 static int hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
3646 struct irq_desc *desc = irq_to_desc(irq);
3647 struct irq_cfg *cfg;
3651 if (set_desc_affinity(desc, mask, &dest))
3654 cfg = get_irq_desc_chip_data(desc);
3656 hpet_msi_read(irq, &msg);
3658 msg.data &= ~MSI_DATA_VECTOR_MASK;
3659 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3660 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3661 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3663 hpet_msi_write(irq, &msg);
3668 #endif /* CONFIG_SMP */
3670 static struct irq_chip ir_hpet_msi_type = {
3671 .name = "IR-HPET_MSI",
3672 .unmask = hpet_msi_unmask,
3673 .mask = hpet_msi_mask,
3674 #ifdef CONFIG_INTR_REMAP
3675 .ack = ir_ack_apic_edge,
3677 .set_affinity = ir_set_msi_irq_affinity,
3680 .retrigger = ioapic_retrigger_irq,
3683 static struct irq_chip hpet_msi_type = {
3685 .unmask = hpet_msi_unmask,
3686 .mask = hpet_msi_mask,
3687 .ack = ack_apic_edge,
3689 .set_affinity = hpet_msi_set_affinity,
3691 .retrigger = ioapic_retrigger_irq,
3694 int arch_setup_hpet_msi(unsigned int irq, unsigned int id)
3698 struct irq_desc *desc = irq_to_desc(irq);
3700 if (intr_remapping_enabled) {
3701 struct intel_iommu *iommu = map_hpet_to_ir(id);
3707 index = alloc_irte(iommu, irq, 1);
3712 ret = msi_compose_msg(NULL, irq, &msg, id);
3716 hpet_msi_write(irq, &msg);
3717 desc->status |= IRQ_MOVE_PCNTXT;
3718 if (irq_remapped(irq))
3719 set_irq_chip_and_handler_name(irq, &ir_hpet_msi_type,
3720 handle_edge_irq, "edge");
3722 set_irq_chip_and_handler_name(irq, &hpet_msi_type,
3723 handle_edge_irq, "edge");
3729 #endif /* CONFIG_PCI_MSI */
3731 * Hypertransport interrupt support
3733 #ifdef CONFIG_HT_IRQ
3737 static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
3739 struct ht_irq_msg msg;
3740 fetch_ht_irq_msg(irq, &msg);
3742 msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
3743 msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
3745 msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
3746 msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
3748 write_ht_irq_msg(irq, &msg);
3751 static int set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask)
3753 struct irq_desc *desc = irq_to_desc(irq);
3754 struct irq_cfg *cfg;
3757 if (set_desc_affinity(desc, mask, &dest))
3760 cfg = get_irq_desc_chip_data(desc);
3762 target_ht_irq(irq, dest, cfg->vector);
3769 static struct irq_chip ht_irq_chip = {
3771 .mask = mask_ht_irq,
3772 .unmask = unmask_ht_irq,
3773 .ack = ack_apic_edge,
3775 .set_affinity = set_ht_irq_affinity,
3777 .retrigger = ioapic_retrigger_irq,
3780 int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3782 struct irq_cfg *cfg;
3789 err = assign_irq_vector(irq, cfg, apic->target_cpus());
3791 struct ht_irq_msg msg;
3794 dest = apic->cpu_mask_to_apicid_and(cfg->domain,
3795 apic->target_cpus());
3797 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
3801 HT_IRQ_LOW_DEST_ID(dest) |
3802 HT_IRQ_LOW_VECTOR(cfg->vector) |
3803 ((apic->irq_dest_mode == 0) ?
3804 HT_IRQ_LOW_DM_PHYSICAL :
3805 HT_IRQ_LOW_DM_LOGICAL) |
3806 HT_IRQ_LOW_RQEOI_EDGE |
3807 ((apic->irq_delivery_mode != dest_LowestPrio) ?
3808 HT_IRQ_LOW_MT_FIXED :
3809 HT_IRQ_LOW_MT_ARBITRATED) |
3810 HT_IRQ_LOW_IRQ_MASKED;
3812 write_ht_irq_msg(irq, &msg);
3814 set_irq_chip_and_handler_name(irq, &ht_irq_chip,
3815 handle_edge_irq, "edge");
3817 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq);
3821 #endif /* CONFIG_HT_IRQ */
3823 int __init io_apic_get_redir_entries (int ioapic)
3825 union IO_APIC_reg_01 reg_01;
3826 unsigned long flags;
3828 raw_spin_lock_irqsave(&ioapic_lock, flags);
3829 reg_01.raw = io_apic_read(ioapic, 1);
3830 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3832 /* The register returns the maximum index redir index
3833 * supported, which is one less than the total number of redir
3836 return reg_01.bits.entries + 1;
3839 void __init probe_nr_irqs_gsi(void)
3843 nr = gsi_top + NR_IRQS_LEGACY;
3844 if (nr > nr_irqs_gsi)
3847 printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi);
3850 #ifdef CONFIG_SPARSE_IRQ
3851 int __init arch_probe_nr_irqs(void)
3855 if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
3856 nr_irqs = NR_VECTORS * nr_cpu_ids;
3858 nr = nr_irqs_gsi + 8 * nr_cpu_ids;
3859 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
3861 * for MSI and HT dyn irq
3863 nr += nr_irqs_gsi * 16;
3868 return NR_IRQS_LEGACY;
3872 static int __io_apic_set_pci_routing(struct device *dev, int irq,
3873 struct io_apic_irq_attr *irq_attr)
3875 struct irq_desc *desc;
3876 struct irq_cfg *cfg;
3879 int trigger, polarity;
3881 ioapic = irq_attr->ioapic;
3882 if (!IO_APIC_IRQ(irq)) {
3883 apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
3889 node = dev_to_node(dev);
3891 node = cpu_to_node(0);
3893 desc = irq_to_desc_alloc_node(irq, node);
3895 printk(KERN_INFO "can not get irq_desc %d\n", irq);
3899 pin = irq_attr->ioapic_pin;
3900 trigger = irq_attr->trigger;
3901 polarity = irq_attr->polarity;
3904 * IRQs < 16 are already in the irq_2_pin[] map
3906 if (irq >= legacy_pic->nr_legacy_irqs) {
3907 cfg = get_irq_desc_chip_data(desc);
3908 if (add_pin_to_irq_node_nopanic(cfg, node, ioapic, pin)) {
3909 printk(KERN_INFO "can not add pin %d for irq %d\n",
3915 setup_IO_APIC_irq(ioapic, pin, irq, desc, trigger, polarity);
3920 int io_apic_set_pci_routing(struct device *dev, int irq,
3921 struct io_apic_irq_attr *irq_attr)
3925 * Avoid pin reprogramming. PRTs typically include entries
3926 * with redundant pin->gsi mappings (but unique PCI devices);
3927 * we only program the IOAPIC on the first.
3929 ioapic = irq_attr->ioapic;
3930 pin = irq_attr->ioapic_pin;
3931 if (test_bit(pin, mp_ioapic_routing[ioapic].pin_programmed)) {
3932 pr_debug("Pin %d-%d already programmed\n",
3933 mp_ioapics[ioapic].apicid, pin);
3936 set_bit(pin, mp_ioapic_routing[ioapic].pin_programmed);
3938 return __io_apic_set_pci_routing(dev, irq, irq_attr);
3941 u8 __init io_apic_unique_id(u8 id)
3943 #ifdef CONFIG_X86_32
3944 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
3945 !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
3946 return io_apic_get_unique_id(nr_ioapics, id);
3951 DECLARE_BITMAP(used, 256);
3953 bitmap_zero(used, 256);
3954 for (i = 0; i < nr_ioapics; i++) {
3955 struct mpc_ioapic *ia = &mp_ioapics[i];
3956 __set_bit(ia->apicid, used);
3958 if (!test_bit(id, used))
3960 return find_first_zero_bit(used, 256);
3964 #ifdef CONFIG_X86_32
3965 int __init io_apic_get_unique_id(int ioapic, int apic_id)
3967 union IO_APIC_reg_00 reg_00;
3968 static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
3970 unsigned long flags;
3974 * The P4 platform supports up to 256 APIC IDs on two separate APIC
3975 * buses (one for LAPICs, one for IOAPICs), where predecessors only
3976 * supports up to 16 on one shared APIC bus.
3978 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
3979 * advantage of new APIC bus architecture.
3982 if (physids_empty(apic_id_map))
3983 apic->ioapic_phys_id_map(&phys_cpu_present_map, &apic_id_map);
3985 raw_spin_lock_irqsave(&ioapic_lock, flags);
3986 reg_00.raw = io_apic_read(ioapic, 0);
3987 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3989 if (apic_id >= get_physical_broadcast()) {
3990 printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
3991 "%d\n", ioapic, apic_id, reg_00.bits.ID);
3992 apic_id = reg_00.bits.ID;
3996 * Every APIC in a system must have a unique ID or we get lots of nice
3997 * 'stuck on smp_invalidate_needed IPI wait' messages.
3999 if (apic->check_apicid_used(&apic_id_map, apic_id)) {
4001 for (i = 0; i < get_physical_broadcast(); i++) {
4002 if (!apic->check_apicid_used(&apic_id_map, i))
4006 if (i == get_physical_broadcast())
4007 panic("Max apic_id exceeded!\n");
4009 printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
4010 "trying %d\n", ioapic, apic_id, i);
4015 apic->apicid_to_cpu_present(apic_id, &tmp);
4016 physids_or(apic_id_map, apic_id_map, tmp);
4018 if (reg_00.bits.ID != apic_id) {
4019 reg_00.bits.ID = apic_id;
4021 raw_spin_lock_irqsave(&ioapic_lock, flags);
4022 io_apic_write(ioapic, 0, reg_00.raw);
4023 reg_00.raw = io_apic_read(ioapic, 0);
4024 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
4027 if (reg_00.bits.ID != apic_id) {
4028 printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic);
4033 apic_printk(APIC_VERBOSE, KERN_INFO
4034 "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
4040 int __init io_apic_get_version(int ioapic)
4042 union IO_APIC_reg_01 reg_01;
4043 unsigned long flags;
4045 raw_spin_lock_irqsave(&ioapic_lock, flags);
4046 reg_01.raw = io_apic_read(ioapic, 1);
4047 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
4049 return reg_01.bits.version;
4052 int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity)
4054 int ioapic, pin, idx;
4056 if (skip_ioapic_setup)
4059 ioapic = mp_find_ioapic(gsi);
4063 pin = mp_find_ioapic_pin(ioapic, gsi);
4067 idx = find_irq_entry(ioapic, pin, mp_INT);
4071 *trigger = irq_trigger(idx);
4072 *polarity = irq_polarity(idx);
4077 * This function currently is only a helper for the i386 smp boot process where
4078 * we need to reprogram the ioredtbls to cater for the cpus which have come online
4079 * so mask in all cases should simply be apic->target_cpus()
4082 void __init setup_ioapic_dest(void)
4084 int pin, ioapic, irq, irq_entry;
4085 struct irq_desc *desc;
4086 const struct cpumask *mask;
4088 if (skip_ioapic_setup == 1)
4091 for (ioapic = 0; ioapic < nr_ioapics; ioapic++)
4092 for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
4093 irq_entry = find_irq_entry(ioapic, pin, mp_INT);
4094 if (irq_entry == -1)
4096 irq = pin_2_irq(irq_entry, ioapic, pin);
4098 if ((ioapic > 0) && (irq > 16))
4101 desc = irq_to_desc(irq);
4104 * Honour affinities which have been set in early boot
4107 (IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
4108 mask = desc->affinity;
4110 mask = apic->target_cpus();
4112 if (intr_remapping_enabled)
4113 set_ir_ioapic_affinity_irq_desc(desc, mask);
4115 set_ioapic_affinity_irq_desc(desc, mask);
4121 #define IOAPIC_RESOURCE_NAME_SIZE 11
4123 static struct resource *ioapic_resources;
4125 static struct resource * __init ioapic_setup_resources(int nr_ioapics)
4128 struct resource *res;
4132 if (nr_ioapics <= 0)
4135 n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
4138 mem = alloc_bootmem(n);
4141 mem += sizeof(struct resource) * nr_ioapics;
4143 for (i = 0; i < nr_ioapics; i++) {
4145 res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
4146 snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i);
4147 mem += IOAPIC_RESOURCE_NAME_SIZE;
4150 ioapic_resources = res;
4155 void __init ioapic_init_mappings(void)
4157 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
4158 struct resource *ioapic_res;
4161 ioapic_res = ioapic_setup_resources(nr_ioapics);
4162 for (i = 0; i < nr_ioapics; i++) {
4163 if (smp_found_config) {
4164 ioapic_phys = mp_ioapics[i].apicaddr;
4165 #ifdef CONFIG_X86_32
4168 "WARNING: bogus zero IO-APIC "
4169 "address found in MPTABLE, "
4170 "disabling IO/APIC support!\n");
4171 smp_found_config = 0;
4172 skip_ioapic_setup = 1;
4173 goto fake_ioapic_page;
4177 #ifdef CONFIG_X86_32
4180 ioapic_phys = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
4181 ioapic_phys = __pa(ioapic_phys);
4183 set_fixmap_nocache(idx, ioapic_phys);
4184 apic_printk(APIC_VERBOSE, "mapped IOAPIC to %08lx (%08lx)\n",
4185 __fix_to_virt(idx) + (ioapic_phys & ~PAGE_MASK),
4189 ioapic_res->start = ioapic_phys;
4190 ioapic_res->end = ioapic_phys + IO_APIC_SLOT_SIZE - 1;
4195 void __init ioapic_insert_resources(void)
4198 struct resource *r = ioapic_resources;
4203 "IO APIC resources couldn't be allocated.\n");
4207 for (i = 0; i < nr_ioapics; i++) {
4208 insert_resource(&iomem_resource, r);
4213 int mp_find_ioapic(u32 gsi)
4217 /* Find the IOAPIC that manages this GSI. */
4218 for (i = 0; i < nr_ioapics; i++) {
4219 if ((gsi >= mp_gsi_routing[i].gsi_base)
4220 && (gsi <= mp_gsi_routing[i].gsi_end))
4224 printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
4228 int mp_find_ioapic_pin(int ioapic, u32 gsi)
4230 if (WARN_ON(ioapic == -1))
4232 if (WARN_ON(gsi > mp_gsi_routing[ioapic].gsi_end))
4235 return gsi - mp_gsi_routing[ioapic].gsi_base;
4238 static int bad_ioapic(unsigned long address)
4240 if (nr_ioapics >= MAX_IO_APICS) {
4241 printk(KERN_WARNING "WARING: Max # of I/O APICs (%d) exceeded "
4242 "(found %d), skipping\n", MAX_IO_APICS, nr_ioapics);
4246 printk(KERN_WARNING "WARNING: Bogus (zero) I/O APIC address"
4247 " found in table, skipping!\n");
4253 void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
4258 if (bad_ioapic(address))
4263 mp_ioapics[idx].type = MP_IOAPIC;
4264 mp_ioapics[idx].flags = MPC_APIC_USABLE;
4265 mp_ioapics[idx].apicaddr = address;
4267 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
4268 mp_ioapics[idx].apicid = io_apic_unique_id(id);
4269 mp_ioapics[idx].apicver = io_apic_get_version(idx);
4272 * Build basic GSI lookup table to facilitate gsi->io_apic lookups
4273 * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
4275 entries = io_apic_get_redir_entries(idx);
4276 mp_gsi_routing[idx].gsi_base = gsi_base;
4277 mp_gsi_routing[idx].gsi_end = gsi_base + entries - 1;
4280 * The number of IO-APIC IRQ registers (== #pins):
4282 nr_ioapic_registers[idx] = entries;
4284 if (mp_gsi_routing[idx].gsi_end >= gsi_top)
4285 gsi_top = mp_gsi_routing[idx].gsi_end + 1;
4287 printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
4288 "GSI %d-%d\n", idx, mp_ioapics[idx].apicid,
4289 mp_ioapics[idx].apicver, mp_ioapics[idx].apicaddr,
4290 mp_gsi_routing[idx].gsi_base, mp_gsi_routing[idx].gsi_end);
4295 /* Enable IOAPIC early just for system timer */
4296 void __init pre_init_apic_IRQ0(void)
4298 struct irq_cfg *cfg;
4299 struct irq_desc *desc;
4301 printk(KERN_INFO "Early APIC setup for system timer0\n");
4303 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid);
4305 desc = irq_to_desc_alloc_node(0, 0);
4310 add_pin_to_irq_node(cfg, 0, 0, 0);
4311 set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
4313 setup_IO_APIC_irq(0, 0, 0, desc, 0, 0);