2 * linux/arch/arm/common/gic.c
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * Interrupt architecture for the GIC:
12 * o There is one Interrupt Distributor, which receives interrupts
13 * from system devices and sends them to the Interrupt Controllers.
15 * o There is one CPU Interface per CPU, which sends interrupts sent
16 * by the Distributor, and interrupts generated locally, to the
17 * associated CPU. The base address of the CPU interface is usually
18 * aliased so that the same address points to different chips depending
19 * on the CPU it is accessed from.
21 * Note that IRQs 0-31 are special - they are local to each CPU.
22 * As such, the enable set/clear, pending set/clear and active bit
23 * registers are banked per-cpu for these sources.
25 #include <linux/init.h>
26 #include <linux/kernel.h>
27 #include <linux/err.h>
28 #include <linux/module.h>
29 #include <linux/list.h>
30 #include <linux/smp.h>
31 #include <linux/cpu_pm.h>
32 #include <linux/cpumask.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/irqdomain.h>
38 #include <linux/interrupt.h>
39 #include <linux/percpu.h>
40 #include <linux/slab.h>
43 #include <asm/exception.h>
44 #include <asm/mach/irq.h>
45 #include <asm/hardware/gic.h>
48 void __iomem *common_base;
49 void __percpu __iomem **percpu_base;
52 struct gic_chip_data {
53 unsigned int irq_offset;
54 union gic_base dist_base;
55 union gic_base cpu_base;
57 u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
58 u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
59 u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
60 u32 __percpu *saved_ppi_enable;
61 u32 __percpu *saved_ppi_conf;
63 #ifdef CONFIG_IRQ_DOMAIN
64 struct irq_domain domain;
66 unsigned int gic_irqs;
67 #ifdef CONFIG_GIC_NON_BANKED
68 void __iomem *(*get_base)(union gic_base *);
72 static DEFINE_RAW_SPINLOCK(irq_controller_lock);
75 * Supported arch specific GIC irq extension.
76 * Default make them NULL.
78 struct irq_chip gic_arch_extn = {
82 .irq_retrigger = NULL,
91 static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly;
93 #ifdef CONFIG_GIC_NON_BANKED
94 static void __iomem *gic_get_percpu_base(union gic_base *base)
96 return *__this_cpu_ptr(base->percpu_base);
99 static void __iomem *gic_get_common_base(union gic_base *base)
101 return base->common_base;
104 static inline void __iomem *gic_data_dist_base(struct gic_chip_data *data)
106 return data->get_base(&data->dist_base);
109 static inline void __iomem *gic_data_cpu_base(struct gic_chip_data *data)
111 return data->get_base(&data->cpu_base);
114 static inline void gic_set_base_accessor(struct gic_chip_data *data,
115 void __iomem *(*f)(union gic_base *))
120 #define gic_data_dist_base(d) ((d)->dist_base.common_base)
121 #define gic_data_cpu_base(d) ((d)->cpu_base.common_base)
122 #define gic_set_base_accessor(d,f)
125 static inline void __iomem *gic_dist_base(struct irq_data *d)
127 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
128 return gic_data_dist_base(gic_data);
131 static inline void __iomem *gic_cpu_base(struct irq_data *d)
133 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
134 return gic_data_cpu_base(gic_data);
137 static inline unsigned int gic_irq(struct irq_data *d)
143 * Routines to acknowledge, disable and enable interrupts
145 static void gic_mask_irq(struct irq_data *d)
147 u32 mask = 1 << (gic_irq(d) % 32);
149 raw_spin_lock(&irq_controller_lock);
150 writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
151 if (gic_arch_extn.irq_mask)
152 gic_arch_extn.irq_mask(d);
153 raw_spin_unlock(&irq_controller_lock);
156 static void gic_unmask_irq(struct irq_data *d)
158 u32 mask = 1 << (gic_irq(d) % 32);
160 raw_spin_lock(&irq_controller_lock);
161 if (gic_arch_extn.irq_unmask)
162 gic_arch_extn.irq_unmask(d);
163 writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
164 raw_spin_unlock(&irq_controller_lock);
167 static void gic_eoi_irq(struct irq_data *d)
169 if (gic_arch_extn.irq_eoi) {
170 raw_spin_lock(&irq_controller_lock);
171 gic_arch_extn.irq_eoi(d);
172 raw_spin_unlock(&irq_controller_lock);
175 writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
178 static int gic_set_type(struct irq_data *d, unsigned int type)
180 void __iomem *base = gic_dist_base(d);
181 unsigned int gicirq = gic_irq(d);
182 u32 enablemask = 1 << (gicirq % 32);
183 u32 enableoff = (gicirq / 32) * 4;
184 u32 confmask = 0x2 << ((gicirq % 16) * 2);
185 u32 confoff = (gicirq / 16) * 4;
186 bool enabled = false;
189 /* Interrupt configuration for SGIs can't be changed */
193 if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
196 raw_spin_lock(&irq_controller_lock);
198 if (gic_arch_extn.irq_set_type)
199 gic_arch_extn.irq_set_type(d, type);
201 val = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
202 if (type == IRQ_TYPE_LEVEL_HIGH)
204 else if (type == IRQ_TYPE_EDGE_RISING)
208 * As recommended by the spec, disable the interrupt before changing
211 if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) {
212 writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff);
216 writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
219 writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
221 raw_spin_unlock(&irq_controller_lock);
226 static int gic_retrigger(struct irq_data *d)
228 if (gic_arch_extn.irq_retrigger)
229 return gic_arch_extn.irq_retrigger(d);
235 static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
238 void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
239 unsigned int shift = (gic_irq(d) % 4) * 8;
240 unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
243 if (cpu >= 8 || cpu >= nr_cpu_ids)
246 mask = 0xff << shift;
247 bit = 1 << (cpu_logical_map(cpu) + shift);
249 raw_spin_lock(&irq_controller_lock);
250 val = readl_relaxed(reg) & ~mask;
251 writel_relaxed(val | bit, reg);
252 raw_spin_unlock(&irq_controller_lock);
254 return IRQ_SET_MASK_OK;
259 static int gic_set_wake(struct irq_data *d, unsigned int on)
263 if (gic_arch_extn.irq_set_wake)
264 ret = gic_arch_extn.irq_set_wake(d, on);
270 #define gic_set_wake NULL
273 asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
276 struct gic_chip_data *gic = &gic_data[0];
277 void __iomem *cpu_base = gic_data_cpu_base(gic);
280 irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
281 irqnr = irqstat & ~0x1c00;
283 if (likely(irqnr > 15 && irqnr < 1021)) {
284 irqnr = irq_domain_to_irq(&gic->domain, irqnr);
285 handle_IRQ(irqnr, regs);
289 writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
291 handle_IPI(irqnr, regs);
299 static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
301 struct gic_chip_data *chip_data = irq_get_handler_data(irq);
302 struct irq_chip *chip = irq_get_chip(irq);
303 unsigned int cascade_irq, gic_irq;
304 unsigned long status;
306 chained_irq_enter(chip, desc);
308 raw_spin_lock(&irq_controller_lock);
309 status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK);
310 raw_spin_unlock(&irq_controller_lock);
312 gic_irq = (status & 0x3ff);
316 cascade_irq = irq_domain_to_irq(&chip_data->domain, gic_irq);
317 if (unlikely(gic_irq < 32 || gic_irq > 1020 || cascade_irq >= NR_IRQS))
318 do_bad_IRQ(cascade_irq, desc);
320 generic_handle_irq(cascade_irq);
323 chained_irq_exit(chip, desc);
326 static struct irq_chip gic_chip = {
328 .irq_mask = gic_mask_irq,
329 .irq_unmask = gic_unmask_irq,
330 .irq_eoi = gic_eoi_irq,
331 .irq_set_type = gic_set_type,
332 .irq_retrigger = gic_retrigger,
334 .irq_set_affinity = gic_set_affinity,
336 .irq_set_wake = gic_set_wake,
339 void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
341 if (gic_nr >= MAX_GIC_NR)
343 if (irq_set_handler_data(irq, &gic_data[gic_nr]) != 0)
345 irq_set_chained_handler(irq, gic_handle_cascade_irq);
348 static void __init gic_dist_init(struct gic_chip_data *gic)
352 unsigned int gic_irqs = gic->gic_irqs;
353 struct irq_domain *domain = &gic->domain;
354 void __iomem *base = gic_data_dist_base(gic);
358 cpu = cpu_logical_map(smp_processor_id());
362 cpumask |= cpumask << 8;
363 cpumask |= cpumask << 16;
365 writel_relaxed(0, base + GIC_DIST_CTRL);
368 * Set all global interrupts to be level triggered, active low.
370 for (i = 32; i < gic_irqs; i += 16)
371 writel_relaxed(0, base + GIC_DIST_CONFIG + i * 4 / 16);
374 * Set all global interrupts to this CPU only.
376 for (i = 32; i < gic_irqs; i += 4)
377 writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
380 * Set priority on all global interrupts.
382 for (i = 32; i < gic_irqs; i += 4)
383 writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
386 * Disable all interrupts. Leave the PPI and SGIs alone
387 * as these enables are banked registers.
389 for (i = 32; i < gic_irqs; i += 32)
390 writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
393 * Setup the Linux IRQ subsystem.
395 irq_domain_for_each_irq(domain, i, irq) {
397 irq_set_percpu_devid(irq);
398 irq_set_chip_and_handler(irq, &gic_chip,
399 handle_percpu_devid_irq);
400 set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
402 irq_set_chip_and_handler(irq, &gic_chip,
404 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
406 irq_set_chip_data(irq, gic);
409 writel_relaxed(1, base + GIC_DIST_CTRL);
412 static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
414 void __iomem *dist_base = gic_data_dist_base(gic);
415 void __iomem *base = gic_data_cpu_base(gic);
419 * Deal with the banked PPI and SGI interrupts - disable all
420 * PPI interrupts, ensure all SGI interrupts are enabled.
422 writel_relaxed(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
423 writel_relaxed(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
426 * Set priority on PPI and SGI interrupts
428 for (i = 0; i < 32; i += 4)
429 writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4);
431 writel_relaxed(0xf0, base + GIC_CPU_PRIMASK);
432 writel_relaxed(1, base + GIC_CPU_CTRL);
437 * Saves the GIC distributor registers during suspend or idle. Must be called
438 * with interrupts disabled but before powering down the GIC. After calling
439 * this function, no interrupts will be delivered by the GIC, and another
440 * platform-specific wakeup source must be enabled.
442 static void gic_dist_save(unsigned int gic_nr)
444 unsigned int gic_irqs;
445 void __iomem *dist_base;
448 if (gic_nr >= MAX_GIC_NR)
451 gic_irqs = gic_data[gic_nr].gic_irqs;
452 dist_base = gic_data_dist_base(&gic_data[gic_nr]);
457 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
458 gic_data[gic_nr].saved_spi_conf[i] =
459 readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
461 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
462 gic_data[gic_nr].saved_spi_target[i] =
463 readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
465 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
466 gic_data[gic_nr].saved_spi_enable[i] =
467 readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
471 * Restores the GIC distributor registers during resume or when coming out of
472 * idle. Must be called before enabling interrupts. If a level interrupt
473 * that occured while the GIC was suspended is still present, it will be
474 * handled normally, but any edge interrupts that occured will not be seen by
475 * the GIC and need to be handled by the platform-specific wakeup source.
477 static void gic_dist_restore(unsigned int gic_nr)
479 unsigned int gic_irqs;
481 void __iomem *dist_base;
483 if (gic_nr >= MAX_GIC_NR)
486 gic_irqs = gic_data[gic_nr].gic_irqs;
487 dist_base = gic_data_dist_base(&gic_data[gic_nr]);
492 writel_relaxed(0, dist_base + GIC_DIST_CTRL);
494 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
495 writel_relaxed(gic_data[gic_nr].saved_spi_conf[i],
496 dist_base + GIC_DIST_CONFIG + i * 4);
498 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
499 writel_relaxed(0xa0a0a0a0,
500 dist_base + GIC_DIST_PRI + i * 4);
502 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
503 writel_relaxed(gic_data[gic_nr].saved_spi_target[i],
504 dist_base + GIC_DIST_TARGET + i * 4);
506 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
507 writel_relaxed(gic_data[gic_nr].saved_spi_enable[i],
508 dist_base + GIC_DIST_ENABLE_SET + i * 4);
510 writel_relaxed(1, dist_base + GIC_DIST_CTRL);
513 static void gic_cpu_save(unsigned int gic_nr)
517 void __iomem *dist_base;
518 void __iomem *cpu_base;
520 if (gic_nr >= MAX_GIC_NR)
523 dist_base = gic_data_dist_base(&gic_data[gic_nr]);
524 cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
526 if (!dist_base || !cpu_base)
529 ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
530 for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
531 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
533 ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
534 for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
535 ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
539 static void gic_cpu_restore(unsigned int gic_nr)
543 void __iomem *dist_base;
544 void __iomem *cpu_base;
546 if (gic_nr >= MAX_GIC_NR)
549 dist_base = gic_data_dist_base(&gic_data[gic_nr]);
550 cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
552 if (!dist_base || !cpu_base)
555 ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
556 for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
557 writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
559 ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
560 for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
561 writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
563 for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
564 writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4);
566 writel_relaxed(0xf0, cpu_base + GIC_CPU_PRIMASK);
567 writel_relaxed(1, cpu_base + GIC_CPU_CTRL);
570 static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
574 for (i = 0; i < MAX_GIC_NR; i++) {
575 #ifdef CONFIG_GIC_NON_BANKED
576 /* Skip over unused GICs */
577 if (!gic_data[i].get_base)
584 case CPU_PM_ENTER_FAILED:
588 case CPU_CLUSTER_PM_ENTER:
591 case CPU_CLUSTER_PM_ENTER_FAILED:
592 case CPU_CLUSTER_PM_EXIT:
601 static struct notifier_block gic_notifier_block = {
602 .notifier_call = gic_notifier,
605 static void __init gic_pm_init(struct gic_chip_data *gic)
607 gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
609 BUG_ON(!gic->saved_ppi_enable);
611 gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
613 BUG_ON(!gic->saved_ppi_conf);
615 if (gic == &gic_data[0])
616 cpu_pm_register_notifier(&gic_notifier_block);
619 static void __init gic_pm_init(struct gic_chip_data *gic)
625 static int gic_irq_domain_dt_translate(struct irq_domain *d,
626 struct device_node *controller,
627 const u32 *intspec, unsigned int intsize,
628 unsigned long *out_hwirq, unsigned int *out_type)
630 if (d->of_node != controller)
635 /* Get the interrupt number and add 16 to skip over SGIs */
636 *out_hwirq = intspec[1] + 16;
638 /* For SPIs, we need to add 16 more to get the GIC irq ID number */
642 *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
647 const struct irq_domain_ops gic_irq_domain_ops = {
649 .dt_translate = gic_irq_domain_dt_translate,
653 void __init gic_init_bases(unsigned int gic_nr, int irq_start,
654 void __iomem *dist_base, void __iomem *cpu_base,
657 struct gic_chip_data *gic;
658 struct irq_domain *domain;
661 BUG_ON(gic_nr >= MAX_GIC_NR);
663 gic = &gic_data[gic_nr];
664 domain = &gic->domain;
665 #ifdef CONFIG_GIC_NON_BANKED
666 if (percpu_offset) { /* Frankein-GIC without banked registers... */
669 gic->dist_base.percpu_base = alloc_percpu(void __iomem *);
670 gic->cpu_base.percpu_base = alloc_percpu(void __iomem *);
671 if (WARN_ON(!gic->dist_base.percpu_base ||
672 !gic->cpu_base.percpu_base)) {
673 free_percpu(gic->dist_base.percpu_base);
674 free_percpu(gic->cpu_base.percpu_base);
678 for_each_possible_cpu(cpu) {
679 unsigned long offset = percpu_offset * cpu_logical_map(cpu);
680 *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
681 *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
684 gic_set_base_accessor(gic, gic_get_percpu_base);
687 { /* Normal, sane GIC... */
689 "GIC_NON_BANKED not enabled, ignoring %08x offset!",
691 gic->dist_base.common_base = dist_base;
692 gic->cpu_base.common_base = cpu_base;
693 gic_set_base_accessor(gic, gic_get_common_base);
697 * For primary GICs, skip over SGIs.
698 * For secondary GICs, skip over PPIs, too.
700 domain->hwirq_base = 32;
702 if ((irq_start & 31) > 0) {
703 domain->hwirq_base = 16;
705 irq_start = (irq_start & ~31) + 16;
710 * Find out how many interrupts are supported.
711 * The GIC only supports up to 1020 interrupt sources.
713 gic_irqs = readl_relaxed(gic_data_dist_base(gic) + GIC_DIST_CTR) & 0x1f;
714 gic_irqs = (gic_irqs + 1) * 32;
717 gic->gic_irqs = gic_irqs;
719 domain->nr_irq = gic_irqs - domain->hwirq_base;
720 domain->irq_base = irq_alloc_descs(irq_start, 16, domain->nr_irq,
722 if (IS_ERR_VALUE(domain->irq_base)) {
723 WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
725 domain->irq_base = irq_start;
728 domain->ops = &gic_irq_domain_ops;
729 irq_domain_add(domain);
731 gic_chip.flags |= gic_arch_extn.flags;
737 void __cpuinit gic_secondary_init(unsigned int gic_nr)
739 BUG_ON(gic_nr >= MAX_GIC_NR);
741 gic_cpu_init(&gic_data[gic_nr]);
745 void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
748 unsigned long map = 0;
750 /* Convert our logical CPU mask into a physical one. */
751 for_each_cpu(cpu, mask)
752 map |= 1 << cpu_logical_map(cpu);
755 * Ensure that stores to Normal memory are visible to the
756 * other CPUs before issuing the IPI.
760 /* this always happens on GIC0 */
761 writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
766 static int gic_cnt __initdata = 0;
768 int __init gic_of_init(struct device_node *node, struct device_node *parent)
770 void __iomem *cpu_base;
771 void __iomem *dist_base;
774 struct irq_domain *domain = &gic_data[gic_cnt].domain;
779 dist_base = of_iomap(node, 0);
780 WARN(!dist_base, "unable to map gic dist registers\n");
782 cpu_base = of_iomap(node, 1);
783 WARN(!cpu_base, "unable to map gic cpu registers\n");
785 if (of_property_read_u32(node, "cpu-offset", &percpu_offset))
788 domain->of_node = of_node_get(node);
790 gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset);
793 irq = irq_of_parse_and_map(node, 0);
794 gic_cascade_irq(gic_cnt, irq);