2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
7 * Copyright (C) 2008 Nicolas Schichan <nschichan@freebox.fr>
10 #include <linux/kernel.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/module.h>
14 #include <linux/irq.h>
15 #include <linux/spinlock.h>
16 #include <asm/irq_cpu.h>
17 #include <asm/mipsregs.h>
18 #include <bcm63xx_cpu.h>
19 #include <bcm63xx_regs.h>
20 #include <bcm63xx_io.h>
21 #include <bcm63xx_irq.h>
24 static DEFINE_SPINLOCK(ipic_lock);
25 static DEFINE_SPINLOCK(epic_lock);
27 static u32 irq_stat_addr[2];
28 static u32 irq_mask_addr[2];
29 static void (*dispatch_internal)(int cpu);
30 static int is_ext_irq_cascaded;
31 static unsigned int ext_irq_count;
32 static unsigned int ext_irq_start, ext_irq_end;
33 static unsigned int ext_irq_cfg_reg1, ext_irq_cfg_reg2;
34 static void (*internal_irq_mask)(struct irq_data *d);
35 static void (*internal_irq_unmask)(struct irq_data *d, const struct cpumask *m);
38 static inline u32 get_ext_irq_perf_reg(int irq)
41 return ext_irq_cfg_reg1;
42 return ext_irq_cfg_reg2;
45 static inline void handle_internal(int intbit)
47 if (is_ext_irq_cascaded &&
48 intbit >= ext_irq_start && intbit <= ext_irq_end)
49 do_IRQ(intbit - ext_irq_start + IRQ_EXTERNAL_BASE);
51 do_IRQ(intbit + IRQ_INTERNAL_BASE);
54 static inline int enable_irq_for_cpu(int cpu, struct irq_data *d,
55 const struct cpumask *m)
57 bool enable = cpu_online(cpu);
61 enable &= cpumask_test_cpu(cpu, m);
62 else if (irqd_affinity_was_set(d))
63 enable &= cpumask_test_cpu(cpu, irq_data_get_affinity_mask(d));
69 * dispatch internal devices IRQ (uart, enet, watchdog, ...). do not
70 * prioritize any interrupt relatively to another. the static counter
71 * will resume the loop where it ended the last time we left this
75 #define BUILD_IPIC_INTERNAL(width) \
76 void __dispatch_internal_##width(int cpu) \
78 u32 pending[width / 32]; \
79 unsigned int src, tgt; \
80 bool irqs_pending = false; \
81 static unsigned int i[2]; \
82 unsigned int *next = &i[cpu]; \
83 unsigned long flags; \
85 /* read registers in reverse order */ \
86 spin_lock_irqsave(&ipic_lock, flags); \
87 for (src = 0, tgt = (width / 32); src < (width / 32); src++) { \
90 val = bcm_readl(irq_stat_addr[cpu] + src * sizeof(u32)); \
91 val &= bcm_readl(irq_mask_addr[cpu] + src * sizeof(u32)); \
92 pending[--tgt] = val; \
95 irqs_pending = true; \
97 spin_unlock_irqrestore(&ipic_lock, flags); \
103 unsigned int to_call = *next; \
105 *next = (*next + 1) & (width - 1); \
106 if (pending[to_call / 32] & (1 << (to_call & 0x1f))) { \
107 handle_internal(to_call); \
113 static void __internal_irq_mask_##width(struct irq_data *d) \
116 unsigned irq = d->irq - IRQ_INTERNAL_BASE; \
117 unsigned reg = (irq / 32) ^ (width/32 - 1); \
118 unsigned bit = irq & 0x1f; \
119 unsigned long flags; \
122 spin_lock_irqsave(&ipic_lock, flags); \
123 for_each_present_cpu(cpu) { \
124 if (!irq_mask_addr[cpu]) \
127 val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\
128 val &= ~(1 << bit); \
129 bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\
131 spin_unlock_irqrestore(&ipic_lock, flags); \
134 static void __internal_irq_unmask_##width(struct irq_data *d, \
135 const struct cpumask *m) \
138 unsigned irq = d->irq - IRQ_INTERNAL_BASE; \
139 unsigned reg = (irq / 32) ^ (width/32 - 1); \
140 unsigned bit = irq & 0x1f; \
141 unsigned long flags; \
144 spin_lock_irqsave(&ipic_lock, flags); \
145 for_each_present_cpu(cpu) { \
146 if (!irq_mask_addr[cpu]) \
149 val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\
150 if (enable_irq_for_cpu(cpu, d, m)) \
153 val &= ~(1 << bit); \
154 bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\
156 spin_unlock_irqrestore(&ipic_lock, flags); \
159 BUILD_IPIC_INTERNAL(32);
160 BUILD_IPIC_INTERNAL(64);
162 asmlinkage void plat_irq_dispatch(void)
167 cause = read_c0_cause() & read_c0_status() & ST0_IM;
172 if (cause & CAUSEF_IP7)
174 if (cause & CAUSEF_IP0)
176 if (cause & CAUSEF_IP1)
178 if (cause & CAUSEF_IP2)
179 dispatch_internal(0);
180 if (is_ext_irq_cascaded) {
181 if (cause & CAUSEF_IP3)
182 dispatch_internal(1);
184 if (cause & CAUSEF_IP3)
186 if (cause & CAUSEF_IP4)
188 if (cause & CAUSEF_IP5)
190 if (cause & CAUSEF_IP6)
197 * internal IRQs operations: only mask/unmask on PERF irq mask
200 static void bcm63xx_internal_irq_mask(struct irq_data *d)
202 internal_irq_mask(d);
205 static void bcm63xx_internal_irq_unmask(struct irq_data *d)
207 internal_irq_unmask(d, NULL);
211 * external IRQs operations: mask/unmask and clear on PERF external
212 * irq control register.
214 static void bcm63xx_external_irq_mask(struct irq_data *d)
216 unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
220 regaddr = get_ext_irq_perf_reg(irq);
221 spin_lock_irqsave(&epic_lock, flags);
222 reg = bcm_perf_readl(regaddr);
224 if (BCMCPU_IS_6348())
225 reg &= ~EXTIRQ_CFG_MASK_6348(irq % 4);
227 reg &= ~EXTIRQ_CFG_MASK(irq % 4);
229 bcm_perf_writel(reg, regaddr);
230 spin_unlock_irqrestore(&epic_lock, flags);
232 if (is_ext_irq_cascaded)
233 internal_irq_mask(irq_get_irq_data(irq + ext_irq_start));
236 static void bcm63xx_external_irq_unmask(struct irq_data *d)
238 unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
242 regaddr = get_ext_irq_perf_reg(irq);
243 spin_lock_irqsave(&epic_lock, flags);
244 reg = bcm_perf_readl(regaddr);
246 if (BCMCPU_IS_6348())
247 reg |= EXTIRQ_CFG_MASK_6348(irq % 4);
249 reg |= EXTIRQ_CFG_MASK(irq % 4);
251 bcm_perf_writel(reg, regaddr);
252 spin_unlock_irqrestore(&epic_lock, flags);
254 if (is_ext_irq_cascaded)
255 internal_irq_unmask(irq_get_irq_data(irq + ext_irq_start),
259 static void bcm63xx_external_irq_clear(struct irq_data *d)
261 unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
265 regaddr = get_ext_irq_perf_reg(irq);
266 spin_lock_irqsave(&epic_lock, flags);
267 reg = bcm_perf_readl(regaddr);
269 if (BCMCPU_IS_6348())
270 reg |= EXTIRQ_CFG_CLEAR_6348(irq % 4);
272 reg |= EXTIRQ_CFG_CLEAR(irq % 4);
274 bcm_perf_writel(reg, regaddr);
275 spin_unlock_irqrestore(&epic_lock, flags);
278 static int bcm63xx_external_irq_set_type(struct irq_data *d,
279 unsigned int flow_type)
281 unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
283 int levelsense, sense, bothedge;
286 flow_type &= IRQ_TYPE_SENSE_MASK;
288 if (flow_type == IRQ_TYPE_NONE)
289 flow_type = IRQ_TYPE_LEVEL_LOW;
291 levelsense = sense = bothedge = 0;
293 case IRQ_TYPE_EDGE_BOTH:
297 case IRQ_TYPE_EDGE_RISING:
301 case IRQ_TYPE_EDGE_FALLING:
304 case IRQ_TYPE_LEVEL_HIGH:
309 case IRQ_TYPE_LEVEL_LOW:
314 pr_err("bogus flow type combination given !\n");
318 regaddr = get_ext_irq_perf_reg(irq);
319 spin_lock_irqsave(&epic_lock, flags);
320 reg = bcm_perf_readl(regaddr);
323 switch (bcm63xx_get_cpu_id()) {
326 reg |= EXTIRQ_CFG_LEVELSENSE_6348(irq);
328 reg &= ~EXTIRQ_CFG_LEVELSENSE_6348(irq);
330 reg |= EXTIRQ_CFG_SENSE_6348(irq);
332 reg &= ~EXTIRQ_CFG_SENSE_6348(irq);
334 reg |= EXTIRQ_CFG_BOTHEDGE_6348(irq);
336 reg &= ~EXTIRQ_CFG_BOTHEDGE_6348(irq);
347 reg |= EXTIRQ_CFG_LEVELSENSE(irq);
349 reg &= ~EXTIRQ_CFG_LEVELSENSE(irq);
351 reg |= EXTIRQ_CFG_SENSE(irq);
353 reg &= ~EXTIRQ_CFG_SENSE(irq);
355 reg |= EXTIRQ_CFG_BOTHEDGE(irq);
357 reg &= ~EXTIRQ_CFG_BOTHEDGE(irq);
363 bcm_perf_writel(reg, regaddr);
364 spin_unlock_irqrestore(&epic_lock, flags);
366 irqd_set_trigger_type(d, flow_type);
367 if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
368 irq_set_handler_locked(d, handle_level_irq);
370 irq_set_handler_locked(d, handle_edge_irq);
372 return IRQ_SET_MASK_OK_NOCOPY;
376 static int bcm63xx_internal_set_affinity(struct irq_data *data,
377 const struct cpumask *dest,
380 if (!irqd_irq_disabled(data))
381 internal_irq_unmask(data, dest);
387 static struct irq_chip bcm63xx_internal_irq_chip = {
388 .name = "bcm63xx_ipic",
389 .irq_mask = bcm63xx_internal_irq_mask,
390 .irq_unmask = bcm63xx_internal_irq_unmask,
393 static struct irq_chip bcm63xx_external_irq_chip = {
394 .name = "bcm63xx_epic",
395 .irq_ack = bcm63xx_external_irq_clear,
397 .irq_mask = bcm63xx_external_irq_mask,
398 .irq_unmask = bcm63xx_external_irq_unmask,
400 .irq_set_type = bcm63xx_external_irq_set_type,
403 static struct irqaction cpu_ip2_cascade_action = {
404 .handler = no_action,
405 .name = "cascade_ip2",
406 .flags = IRQF_NO_THREAD,
410 static struct irqaction cpu_ip3_cascade_action = {
411 .handler = no_action,
412 .name = "cascade_ip3",
413 .flags = IRQF_NO_THREAD,
417 static struct irqaction cpu_ext_cascade_action = {
418 .handler = no_action,
419 .name = "cascade_extirq",
420 .flags = IRQF_NO_THREAD,
423 static void bcm63xx_init_irq(void)
427 irq_stat_addr[0] = bcm63xx_regset_address(RSET_PERF);
428 irq_mask_addr[0] = bcm63xx_regset_address(RSET_PERF);
429 irq_stat_addr[1] = bcm63xx_regset_address(RSET_PERF);
430 irq_mask_addr[1] = bcm63xx_regset_address(RSET_PERF);
432 switch (bcm63xx_get_cpu_id()) {
434 irq_stat_addr[0] += PERF_IRQSTAT_3368_REG;
435 irq_mask_addr[0] += PERF_IRQMASK_3368_REG;
436 irq_stat_addr[1] = 0;
437 irq_mask_addr[1] = 0;
440 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_3368;
443 irq_stat_addr[0] += PERF_IRQSTAT_6328_REG(0);
444 irq_mask_addr[0] += PERF_IRQMASK_6328_REG(0);
445 irq_stat_addr[1] += PERF_IRQSTAT_6328_REG(1);
446 irq_mask_addr[1] += PERF_IRQMASK_6328_REG(1);
449 is_ext_irq_cascaded = 1;
450 ext_irq_start = BCM_6328_EXT_IRQ0 - IRQ_INTERNAL_BASE;
451 ext_irq_end = BCM_6328_EXT_IRQ3 - IRQ_INTERNAL_BASE;
452 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6328;
455 irq_stat_addr[0] += PERF_IRQSTAT_6338_REG;
456 irq_mask_addr[0] += PERF_IRQMASK_6338_REG;
457 irq_stat_addr[1] = 0;
458 irq_mask_addr[1] = 0;
461 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6338;
464 irq_stat_addr[0] += PERF_IRQSTAT_6345_REG;
465 irq_mask_addr[0] += PERF_IRQMASK_6345_REG;
466 irq_stat_addr[1] = 0;
467 irq_mask_addr[1] = 0;
470 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6345;
473 irq_stat_addr[0] += PERF_IRQSTAT_6348_REG;
474 irq_mask_addr[0] += PERF_IRQMASK_6348_REG;
475 irq_stat_addr[1] = 0;
476 irq_mask_addr[1] = 0;
479 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6348;
482 irq_stat_addr[0] += PERF_IRQSTAT_6358_REG(0);
483 irq_mask_addr[0] += PERF_IRQMASK_6358_REG(0);
484 irq_stat_addr[1] += PERF_IRQSTAT_6358_REG(1);
485 irq_mask_addr[1] += PERF_IRQMASK_6358_REG(1);
488 is_ext_irq_cascaded = 1;
489 ext_irq_start = BCM_6358_EXT_IRQ0 - IRQ_INTERNAL_BASE;
490 ext_irq_end = BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE;
491 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6358;
494 irq_stat_addr[0] += PERF_IRQSTAT_6362_REG(0);
495 irq_mask_addr[0] += PERF_IRQMASK_6362_REG(0);
496 irq_stat_addr[1] += PERF_IRQSTAT_6362_REG(1);
497 irq_mask_addr[1] += PERF_IRQMASK_6362_REG(1);
500 is_ext_irq_cascaded = 1;
501 ext_irq_start = BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE;
502 ext_irq_end = BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE;
503 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6362;
506 irq_stat_addr[0] += PERF_IRQSTAT_6368_REG(0);
507 irq_mask_addr[0] += PERF_IRQMASK_6368_REG(0);
508 irq_stat_addr[1] += PERF_IRQSTAT_6368_REG(1);
509 irq_mask_addr[1] += PERF_IRQMASK_6368_REG(1);
512 is_ext_irq_cascaded = 1;
513 ext_irq_start = BCM_6368_EXT_IRQ0 - IRQ_INTERNAL_BASE;
514 ext_irq_end = BCM_6368_EXT_IRQ5 - IRQ_INTERNAL_BASE;
515 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6368;
516 ext_irq_cfg_reg2 = PERF_EXTIRQ_CFG_REG2_6368;
522 if (irq_bits == 32) {
523 dispatch_internal = __dispatch_internal_32;
524 internal_irq_mask = __internal_irq_mask_32;
525 internal_irq_unmask = __internal_irq_unmask_32;
527 dispatch_internal = __dispatch_internal_64;
528 internal_irq_mask = __internal_irq_mask_64;
529 internal_irq_unmask = __internal_irq_unmask_64;
533 void __init arch_init_irq(void)
539 for (i = IRQ_INTERNAL_BASE; i < NR_IRQS; ++i)
540 irq_set_chip_and_handler(i, &bcm63xx_internal_irq_chip,
543 for (i = IRQ_EXTERNAL_BASE; i < IRQ_EXTERNAL_BASE + ext_irq_count; ++i)
544 irq_set_chip_and_handler(i, &bcm63xx_external_irq_chip,
547 if (!is_ext_irq_cascaded) {
548 for (i = 3; i < 3 + ext_irq_count; ++i)
549 setup_irq(MIPS_CPU_IRQ_BASE + i, &cpu_ext_cascade_action);
552 setup_irq(MIPS_CPU_IRQ_BASE + 2, &cpu_ip2_cascade_action);
554 if (is_ext_irq_cascaded) {
555 setup_irq(MIPS_CPU_IRQ_BASE + 3, &cpu_ip3_cascade_action);
556 bcm63xx_internal_irq_chip.irq_set_affinity =
557 bcm63xx_internal_set_affinity;
559 cpumask_clear(irq_default_affinity);
560 cpumask_set_cpu(smp_processor_id(), irq_default_affinity);