]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/blackfin/mach-common/ints-priority.c
blackfin: mach-common: ints-priority add irq_set_wake
[karo-tx-linux.git] / arch / blackfin / mach-common / ints-priority.c
1 /*
2  * Set up the interrupt priorities
3  *
4  * Copyright  2004-2009 Analog Devices Inc.
5  *                 2003 Bas Vermeulen <bas@buyways.nl>
6  *                 2002 Arcturus Networks Inc. MaTed <mated@sympatico.ca>
7  *            2000-2001 Lineo, Inc. D. Jefff Dionne <jeff@lineo.ca>
8  *                 1999 D. Jeff Dionne <jeff@uclinux.org>
9  *                 1996 Roman Zippel
10  *
11  * Licensed under the GPL-2
12  */
13
14 #include <linux/module.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/seq_file.h>
17 #include <linux/irq.h>
18 #include <linux/sched.h>
19 #include <linux/syscore_ops.h>
20 #include <asm/delay.h>
21 #ifdef CONFIG_IPIPE
22 #include <linux/ipipe.h>
23 #endif
24 #include <asm/traps.h>
25 #include <asm/blackfin.h>
26 #include <asm/gpio.h>
27 #include <asm/irq_handler.h>
28 #include <asm/dpmc.h>
29
30 #ifndef CONFIG_BF60x
31 # define SIC_SYSIRQ(irq)        (irq - (IRQ_CORETMR + 1))
32 #else
33 # define SIC_SYSIRQ(irq)        ((irq) - IVG15)
34 #endif
35
36 /*
37  * NOTES:
38  * - we have separated the physical Hardware interrupt from the
39  * levels that the LINUX kernel sees (see the description in irq.h)
40  * -
41  */
42
43 #ifndef CONFIG_SMP
44 /* Initialize this to an actual value to force it into the .data
45  * section so that we know it is properly initialized at entry into
46  * the kernel but before bss is initialized to zero (which is where
47  * it would live otherwise).  The 0x1f magic represents the IRQs we
48  * cannot actually mask out in hardware.
49  */
50 unsigned long bfin_irq_flags = 0x1f;
51 EXPORT_SYMBOL(bfin_irq_flags);
52 #endif
53
54 #ifdef CONFIG_PM
55 unsigned long bfin_sic_iwr[3];  /* Up to 3 SIC_IWRx registers */
56 unsigned vr_wakeup;
57 #endif
58
59 #ifndef CONFIG_BF60x
60 static struct ivgx {
61         /* irq number for request_irq, available in mach-bf5xx/irq.h */
62         unsigned int irqno;
63         /* corresponding bit in the SIC_ISR register */
64         unsigned int isrflag;
65 } ivg_table[NR_PERI_INTS];
66
67 static struct ivg_slice {
68         /* position of first irq in ivg_table for given ivg */
69         struct ivgx *ifirst;
70         struct ivgx *istop;
71 } ivg7_13[IVG13 - IVG7 + 1];
72
73
74 /*
75  * Search SIC_IAR and fill tables with the irqvalues
76  * and their positions in the SIC_ISR register.
77  */
78 static void __init search_IAR(void)
79 {
80         unsigned ivg, irq_pos = 0;
81         for (ivg = 0; ivg <= IVG13 - IVG7; ivg++) {
82                 int irqN;
83
84                 ivg7_13[ivg].istop = ivg7_13[ivg].ifirst = &ivg_table[irq_pos];
85
86                 for (irqN = 0; irqN < NR_PERI_INTS; irqN += 4) {
87                         int irqn;
88                         u32 iar =
89                                 bfin_read32((unsigned long *)SIC_IAR0 +
90 #if defined(CONFIG_BF51x) || defined(CONFIG_BF52x) || \
91         defined(CONFIG_BF538) || defined(CONFIG_BF539)
92                                 ((irqN % 32) >> 3) + ((irqN / 32) * ((SIC_IAR4 - SIC_IAR0) / 4))
93 #else
94                                 (irqN >> 3)
95 #endif
96                                 );
97                         for (irqn = irqN; irqn < irqN + 4; ++irqn) {
98                                 int iar_shift = (irqn & 7) * 4;
99                                 if (ivg == (0xf & (iar >> iar_shift))) {
100                                         ivg_table[irq_pos].irqno = IVG7 + irqn;
101                                         ivg_table[irq_pos].isrflag = 1 << (irqn % 32);
102                                         ivg7_13[ivg].istop++;
103                                         irq_pos++;
104                                 }
105                         }
106                 }
107         }
108 }
109 #endif
110
111 /*
112  * This is for core internal IRQs
113  */
114 void bfin_ack_noop(struct irq_data *d)
115 {
116         /* Dummy function.  */
117 }
118
119 static void bfin_core_mask_irq(struct irq_data *d)
120 {
121         bfin_irq_flags &= ~(1 << d->irq);
122         if (!hard_irqs_disabled())
123                 hard_local_irq_enable();
124 }
125
126 static void bfin_core_unmask_irq(struct irq_data *d)
127 {
128         bfin_irq_flags |= 1 << d->irq;
129         /*
130          * If interrupts are enabled, IMASK must contain the same value
131          * as bfin_irq_flags.  Make sure that invariant holds.  If interrupts
132          * are currently disabled we need not do anything; one of the
133          * callers will take care of setting IMASK to the proper value
134          * when reenabling interrupts.
135          * local_irq_enable just does "STI bfin_irq_flags", so it's exactly
136          * what we need.
137          */
138         if (!hard_irqs_disabled())
139                 hard_local_irq_enable();
140         return;
141 }
142
143 void bfin_internal_mask_irq(unsigned int irq)
144 {
145         unsigned long flags = hard_local_irq_save();
146 #ifndef CONFIG_BF60x
147 #ifdef SIC_IMASK0
148         unsigned mask_bank = SIC_SYSIRQ(irq) / 32;
149         unsigned mask_bit = SIC_SYSIRQ(irq) % 32;
150         bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) &
151                         ~(1 << mask_bit));
152 # if defined(CONFIG_SMP) || defined(CONFIG_ICC)
153         bfin_write_SICB_IMASK(mask_bank, bfin_read_SICB_IMASK(mask_bank) &
154                         ~(1 << mask_bit));
155 # endif
156 #else
157         bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() &
158                         ~(1 << SIC_SYSIRQ(irq)));
159 #endif /* end of SIC_IMASK0 */
160 #endif
161         hard_local_irq_restore(flags);
162 }
163
164 static void bfin_internal_mask_irq_chip(struct irq_data *d)
165 {
166         bfin_internal_mask_irq(d->irq);
167 }
168
169 #ifdef CONFIG_SMP
170 void bfin_internal_unmask_irq_affinity(unsigned int irq,
171                 const struct cpumask *affinity)
172 #else
173 void bfin_internal_unmask_irq(unsigned int irq)
174 #endif
175 {
176         unsigned long flags = hard_local_irq_save();
177
178 #ifndef CONFIG_BF60x
179 #ifdef SIC_IMASK0
180         unsigned mask_bank = SIC_SYSIRQ(irq) / 32;
181         unsigned mask_bit = SIC_SYSIRQ(irq) % 32;
182 # ifdef CONFIG_SMP
183         if (cpumask_test_cpu(0, affinity))
184 # endif
185                 bfin_write_SIC_IMASK(mask_bank,
186                                 bfin_read_SIC_IMASK(mask_bank) |
187                                 (1 << mask_bit));
188 # ifdef CONFIG_SMP
189         if (cpumask_test_cpu(1, affinity))
190                 bfin_write_SICB_IMASK(mask_bank,
191                                 bfin_read_SICB_IMASK(mask_bank) |
192                                 (1 << mask_bit));
193 # endif
194 #else
195         bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() |
196                         (1 << SIC_SYSIRQ(irq)));
197 #endif
198 #endif
199         hard_local_irq_restore(flags);
200 }
201
202 #ifdef CONFIG_BF60x
203 static void bfin_sec_preflow_handler(struct irq_data *d)
204 {
205         unsigned long flags = hard_local_irq_save();
206         unsigned int sid = SIC_SYSIRQ(d->irq);
207
208         bfin_write_SEC_SCI(0, SEC_CSID, sid);
209
210         hard_local_irq_restore(flags);
211 }
212
213 static void bfin_sec_mask_ack_irq(struct irq_data *d)
214 {
215         unsigned long flags = hard_local_irq_save();
216         unsigned int sid = SIC_SYSIRQ(d->irq);
217
218         bfin_write_SEC_SCI(0, SEC_CSID, sid);
219
220         hard_local_irq_restore(flags);
221 }
222
223 static void bfin_sec_unmask_irq(struct irq_data *d)
224 {
225         unsigned long flags = hard_local_irq_save();
226         unsigned int sid = SIC_SYSIRQ(d->irq);
227
228         bfin_write32(SEC_END, sid);
229
230         hard_local_irq_restore(flags);
231 }
232
233 static void bfin_sec_enable_ssi(unsigned int sid)
234 {
235         unsigned long flags = hard_local_irq_save();
236         uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
237
238         reg_sctl |= SEC_SCTL_SRC_EN;
239         bfin_write_SEC_SCTL(sid, reg_sctl);
240
241         hard_local_irq_restore(flags);
242 }
243
244 static void bfin_sec_disable_ssi(unsigned int sid)
245 {
246         unsigned long flags = hard_local_irq_save();
247         uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
248
249         reg_sctl &= ((uint32_t)~SEC_SCTL_SRC_EN);
250         bfin_write_SEC_SCTL(sid, reg_sctl);
251
252         hard_local_irq_restore(flags);
253 }
254
255 static void bfin_sec_set_ssi_coreid(unsigned int sid, unsigned int coreid)
256 {
257         unsigned long flags = hard_local_irq_save();
258         uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
259
260         reg_sctl &= ((uint32_t)~SEC_SCTL_CTG);
261         bfin_write_SEC_SCTL(sid, reg_sctl | ((coreid << 20) & SEC_SCTL_CTG));
262
263         hard_local_irq_restore(flags);
264 }
265
266 static void bfin_sec_enable_sci(unsigned int sid)
267 {
268         unsigned long flags = hard_local_irq_save();
269         uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
270
271         if (sid == SIC_SYSIRQ(IRQ_WATCH0))
272                 reg_sctl |= SEC_SCTL_FAULT_EN;
273         else
274                 reg_sctl |= SEC_SCTL_INT_EN;
275         bfin_write_SEC_SCTL(sid, reg_sctl);
276
277         hard_local_irq_restore(flags);
278 }
279
280 static void bfin_sec_disable_sci(unsigned int sid)
281 {
282         unsigned long flags = hard_local_irq_save();
283         uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
284
285         reg_sctl &= ((uint32_t)~SEC_SCTL_INT_EN);
286         bfin_write_SEC_SCTL(sid, reg_sctl);
287
288         hard_local_irq_restore(flags);
289 }
290
291 static void bfin_sec_enable(struct irq_data *d)
292 {
293         unsigned long flags = hard_local_irq_save();
294         unsigned int sid = SIC_SYSIRQ(d->irq);
295
296         bfin_sec_enable_sci(sid);
297         bfin_sec_enable_ssi(sid);
298
299         hard_local_irq_restore(flags);
300 }
301
302 static void bfin_sec_disable(struct irq_data *d)
303 {
304         unsigned long flags = hard_local_irq_save();
305         unsigned int sid = SIC_SYSIRQ(d->irq);
306
307         bfin_sec_disable_sci(sid);
308         bfin_sec_disable_ssi(sid);
309
310         hard_local_irq_restore(flags);
311 }
312
313 static void bfin_sec_raise_irq(unsigned int sid)
314 {
315         unsigned long flags = hard_local_irq_save();
316
317         bfin_write32(SEC_RAISE, sid);
318
319         hard_local_irq_restore(flags);
320 }
321
322 static void init_software_driven_irq(void)
323 {
324         bfin_sec_set_ssi_coreid(34, 0);
325         bfin_sec_set_ssi_coreid(35, 1);
326         bfin_sec_set_ssi_coreid(36, 0);
327         bfin_sec_set_ssi_coreid(37, 1);
328 }
329
330 void bfin_sec_resume(void)
331 {
332         bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_RESET);
333         udelay(100);
334         bfin_write_SEC_GCTL(SEC_GCTL_EN);
335         bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_EN | SEC_CCTL_NMI_EN);
336 }
337
338 void handle_sec_sfi_fault(uint32_t gstat)
339 {
340
341 }
342
343 void handle_sec_sci_fault(uint32_t gstat)
344 {
345         uint32_t core_id;
346         uint32_t cstat;
347
348         core_id = gstat & SEC_GSTAT_SCI;
349         cstat = bfin_read_SEC_SCI(core_id, SEC_CSTAT);
350         if (cstat & SEC_CSTAT_ERR) {
351                 switch (cstat & SEC_CSTAT_ERRC) {
352                 case SEC_CSTAT_ACKERR:
353                         printk(KERN_DEBUG "sec ack err\n");
354                         break;
355                 default:
356                         printk(KERN_DEBUG "sec sci unknow err\n");
357                 }
358         }
359
360 }
361
362 void handle_sec_ssi_fault(uint32_t gstat)
363 {
364         uint32_t sid;
365         uint32_t sstat;
366
367         sid = gstat & SEC_GSTAT_SID;
368         sstat = bfin_read_SEC_SSTAT(sid);
369
370 }
371
372 void handle_sec_fault(unsigned int irq, struct irq_desc *desc)
373 {
374         uint32_t sec_gstat;
375
376         raw_spin_lock(&desc->lock);
377
378         sec_gstat = bfin_read32(SEC_GSTAT);
379         if (sec_gstat & SEC_GSTAT_ERR) {
380
381                 switch (sec_gstat & SEC_GSTAT_ERRC) {
382                 case 0:
383                         handle_sec_sfi_fault(sec_gstat);
384                         break;
385                 case SEC_GSTAT_SCIERR:
386                         handle_sec_sci_fault(sec_gstat);
387                         break;
388                 case SEC_GSTAT_SSIERR:
389                         handle_sec_ssi_fault(sec_gstat);
390                         break;
391                 }
392
393
394         }
395
396         raw_spin_unlock(&desc->lock);
397 }
398
399 static int sec_suspend(void)
400 {
401         return 0;
402 }
403
404 static void sec_resume(void)
405 {
406         bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_RESET);
407         udelay(100);
408         bfin_write_SEC_GCTL(SEC_GCTL_EN);
409         bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_EN | SEC_CCTL_NMI_EN);
410 }
411
412 static struct syscore_ops sec_pm_syscore_ops = {
413         .suspend = sec_suspend,
414         .resume = sec_resume,
415 };
416
417 #endif
418
419 #ifdef CONFIG_SMP
420 static void bfin_internal_unmask_irq_chip(struct irq_data *d)
421 {
422         bfin_internal_unmask_irq_affinity(d->irq, d->affinity);
423 }
424
425 static int bfin_internal_set_affinity(struct irq_data *d,
426                                       const struct cpumask *mask, bool force)
427 {
428         bfin_internal_mask_irq(d->irq);
429         bfin_internal_unmask_irq_affinity(d->irq, mask);
430
431         return 0;
432 }
433 #else
434 static void bfin_internal_unmask_irq_chip(struct irq_data *d)
435 {
436         bfin_internal_unmask_irq(d->irq);
437 }
438 #endif
439
440 #if defined(CONFIG_PM) && !defined(CONFIG_BF60x)
441 int bfin_internal_set_wake(unsigned int irq, unsigned int state)
442 {
443         u32 bank, bit, wakeup = 0;
444         unsigned long flags;
445         bank = SIC_SYSIRQ(irq) / 32;
446         bit = SIC_SYSIRQ(irq) % 32;
447
448         switch (irq) {
449 #ifdef IRQ_RTC
450         case IRQ_RTC:
451         wakeup |= WAKE;
452         break;
453 #endif
454 #ifdef IRQ_CAN0_RX
455         case IRQ_CAN0_RX:
456         wakeup |= CANWE;
457         break;
458 #endif
459 #ifdef IRQ_CAN1_RX
460         case IRQ_CAN1_RX:
461         wakeup |= CANWE;
462         break;
463 #endif
464 #ifdef IRQ_USB_INT0
465         case IRQ_USB_INT0:
466         wakeup |= USBWE;
467         break;
468 #endif
469 #ifdef CONFIG_BF54x
470         case IRQ_CNT:
471         wakeup |= ROTWE;
472         break;
473 #endif
474         default:
475         break;
476         }
477
478         flags = hard_local_irq_save();
479
480         if (state) {
481                 bfin_sic_iwr[bank] |= (1 << bit);
482                 vr_wakeup  |= wakeup;
483
484         } else {
485                 bfin_sic_iwr[bank] &= ~(1 << bit);
486                 vr_wakeup  &= ~wakeup;
487         }
488
489         hard_local_irq_restore(flags);
490
491         return 0;
492 }
493
494 static int bfin_internal_set_wake_chip(struct irq_data *d, unsigned int state)
495 {
496         return bfin_internal_set_wake(d->irq, state);
497 }
498 #else
499 inline int bfin_internal_set_wake(unsigned int irq, unsigned int state)
500 {
501         return 0;
502 }
503 # define bfin_internal_set_wake_chip NULL
504 #endif
505
506 static struct irq_chip bfin_core_irqchip = {
507         .name = "CORE",
508         .irq_mask = bfin_core_mask_irq,
509         .irq_unmask = bfin_core_unmask_irq,
510 };
511
512 static struct irq_chip bfin_internal_irqchip = {
513         .name = "INTN",
514         .irq_mask = bfin_internal_mask_irq_chip,
515         .irq_unmask = bfin_internal_unmask_irq_chip,
516         .irq_disable = bfin_internal_mask_irq_chip,
517         .irq_enable = bfin_internal_unmask_irq_chip,
518 #ifdef CONFIG_SMP
519         .irq_set_affinity = bfin_internal_set_affinity,
520 #endif
521         .irq_set_wake = bfin_internal_set_wake_chip,
522 };
523
524 #ifdef CONFIG_BF60x
525 static struct irq_chip bfin_sec_irqchip = {
526         .name = "SEC",
527         .irq_mask_ack = bfin_sec_mask_ack_irq,
528         .irq_mask = bfin_sec_mask_ack_irq,
529         .irq_unmask = bfin_sec_unmask_irq,
530         .irq_eoi = bfin_sec_unmask_irq,
531         .irq_disable = bfin_sec_disable,
532         .irq_enable = bfin_sec_enable,
533         .irq_set_wake = bfin_internal_set_wake,
534 };
535 #endif
536
537 void bfin_handle_irq(unsigned irq)
538 {
539 #ifdef CONFIG_IPIPE
540         struct pt_regs regs;    /* Contents not used. */
541         ipipe_trace_irq_entry(irq);
542         __ipipe_handle_irq(irq, &regs);
543         ipipe_trace_irq_exit(irq);
544 #else /* !CONFIG_IPIPE */
545         generic_handle_irq(irq);
546 #endif  /* !CONFIG_IPIPE */
547 }
548
549 #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
550 static int mac_stat_int_mask;
551
552 static void bfin_mac_status_ack_irq(unsigned int irq)
553 {
554         switch (irq) {
555         case IRQ_MAC_MMCINT:
556                 bfin_write_EMAC_MMC_TIRQS(
557                         bfin_read_EMAC_MMC_TIRQE() &
558                         bfin_read_EMAC_MMC_TIRQS());
559                 bfin_write_EMAC_MMC_RIRQS(
560                         bfin_read_EMAC_MMC_RIRQE() &
561                         bfin_read_EMAC_MMC_RIRQS());
562                 break;
563         case IRQ_MAC_RXFSINT:
564                 bfin_write_EMAC_RX_STKY(
565                         bfin_read_EMAC_RX_IRQE() &
566                         bfin_read_EMAC_RX_STKY());
567                 break;
568         case IRQ_MAC_TXFSINT:
569                 bfin_write_EMAC_TX_STKY(
570                         bfin_read_EMAC_TX_IRQE() &
571                         bfin_read_EMAC_TX_STKY());
572                 break;
573         case IRQ_MAC_WAKEDET:
574                  bfin_write_EMAC_WKUP_CTL(
575                         bfin_read_EMAC_WKUP_CTL() | MPKS | RWKS);
576                 break;
577         default:
578                 /* These bits are W1C */
579                 bfin_write_EMAC_SYSTAT(1L << (irq - IRQ_MAC_PHYINT));
580                 break;
581         }
582 }
583
584 static void bfin_mac_status_mask_irq(struct irq_data *d)
585 {
586         unsigned int irq = d->irq;
587
588         mac_stat_int_mask &= ~(1L << (irq - IRQ_MAC_PHYINT));
589 #ifdef BF537_FAMILY
590         switch (irq) {
591         case IRQ_MAC_PHYINT:
592                 bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() & ~PHYIE);
593                 break;
594         default:
595                 break;
596         }
597 #else
598         if (!mac_stat_int_mask)
599                 bfin_internal_mask_irq(IRQ_MAC_ERROR);
600 #endif
601         bfin_mac_status_ack_irq(irq);
602 }
603
604 static void bfin_mac_status_unmask_irq(struct irq_data *d)
605 {
606         unsigned int irq = d->irq;
607
608 #ifdef BF537_FAMILY
609         switch (irq) {
610         case IRQ_MAC_PHYINT:
611                 bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() | PHYIE);
612                 break;
613         default:
614                 break;
615         }
616 #else
617         if (!mac_stat_int_mask)
618                 bfin_internal_unmask_irq(IRQ_MAC_ERROR);
619 #endif
620         mac_stat_int_mask |= 1L << (irq - IRQ_MAC_PHYINT);
621 }
622
623 #ifdef CONFIG_PM
624 int bfin_mac_status_set_wake(struct irq_data *d, unsigned int state)
625 {
626 #ifdef BF537_FAMILY
627         return bfin_internal_set_wake(IRQ_GENERIC_ERROR, state);
628 #else
629         return bfin_internal_set_wake(IRQ_MAC_ERROR, state);
630 #endif
631 }
632 #else
633 # define bfin_mac_status_set_wake NULL
634 #endif
635
636 static struct irq_chip bfin_mac_status_irqchip = {
637         .name = "MACST",
638         .irq_mask = bfin_mac_status_mask_irq,
639         .irq_unmask = bfin_mac_status_unmask_irq,
640         .irq_set_wake = bfin_mac_status_set_wake,
641 };
642
643 void bfin_demux_mac_status_irq(unsigned int int_err_irq,
644                                struct irq_desc *inta_desc)
645 {
646         int i, irq = 0;
647         u32 status = bfin_read_EMAC_SYSTAT();
648
649         for (i = 0; i <= (IRQ_MAC_STMDONE - IRQ_MAC_PHYINT); i++)
650                 if (status & (1L << i)) {
651                         irq = IRQ_MAC_PHYINT + i;
652                         break;
653                 }
654
655         if (irq) {
656                 if (mac_stat_int_mask & (1L << (irq - IRQ_MAC_PHYINT))) {
657                         bfin_handle_irq(irq);
658                 } else {
659                         bfin_mac_status_ack_irq(irq);
660                         pr_debug("IRQ %d:"
661                                         " MASKED MAC ERROR INTERRUPT ASSERTED\n",
662                                         irq);
663                 }
664         } else
665                 printk(KERN_ERR
666                                 "%s : %s : LINE %d :\nIRQ ?: MAC ERROR"
667                                 " INTERRUPT ASSERTED BUT NO SOURCE FOUND"
668                                 "(EMAC_SYSTAT=0x%X)\n",
669                                 __func__, __FILE__, __LINE__, status);
670 }
671 #endif
672
673 static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle)
674 {
675 #ifdef CONFIG_IPIPE
676         handle = handle_level_irq;
677 #endif
678         __irq_set_handler_locked(irq, handle);
679 }
680
681 static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS);
682 extern void bfin_gpio_irq_prepare(unsigned gpio);
683
684 #if !BFIN_GPIO_PINT
685
686 static void bfin_gpio_ack_irq(struct irq_data *d)
687 {
688         /* AFAIK ack_irq in case mask_ack is provided
689          * get's only called for edge sense irqs
690          */
691         set_gpio_data(irq_to_gpio(d->irq), 0);
692 }
693
694 static void bfin_gpio_mask_ack_irq(struct irq_data *d)
695 {
696         unsigned int irq = d->irq;
697         u32 gpionr = irq_to_gpio(irq);
698
699         if (!irqd_is_level_type(d))
700                 set_gpio_data(gpionr, 0);
701
702         set_gpio_maska(gpionr, 0);
703 }
704
705 static void bfin_gpio_mask_irq(struct irq_data *d)
706 {
707         set_gpio_maska(irq_to_gpio(d->irq), 0);
708 }
709
710 static void bfin_gpio_unmask_irq(struct irq_data *d)
711 {
712         set_gpio_maska(irq_to_gpio(d->irq), 1);
713 }
714
715 static unsigned int bfin_gpio_irq_startup(struct irq_data *d)
716 {
717         u32 gpionr = irq_to_gpio(d->irq);
718
719         if (__test_and_set_bit(gpionr, gpio_enabled))
720                 bfin_gpio_irq_prepare(gpionr);
721
722         bfin_gpio_unmask_irq(d);
723
724         return 0;
725 }
726
727 static void bfin_gpio_irq_shutdown(struct irq_data *d)
728 {
729         u32 gpionr = irq_to_gpio(d->irq);
730
731         bfin_gpio_mask_irq(d);
732         __clear_bit(gpionr, gpio_enabled);
733         bfin_gpio_irq_free(gpionr);
734 }
735
736 static int bfin_gpio_irq_type(struct irq_data *d, unsigned int type)
737 {
738         unsigned int irq = d->irq;
739         int ret;
740         char buf[16];
741         u32 gpionr = irq_to_gpio(irq);
742
743         if (type == IRQ_TYPE_PROBE) {
744                 /* only probe unenabled GPIO interrupt lines */
745                 if (test_bit(gpionr, gpio_enabled))
746                         return 0;
747                 type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
748         }
749
750         if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING |
751                     IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
752
753                 snprintf(buf, 16, "gpio-irq%d", irq);
754                 ret = bfin_gpio_irq_request(gpionr, buf);
755                 if (ret)
756                         return ret;
757
758                 if (__test_and_set_bit(gpionr, gpio_enabled))
759                         bfin_gpio_irq_prepare(gpionr);
760
761         } else {
762                 __clear_bit(gpionr, gpio_enabled);
763                 return 0;
764         }
765
766         set_gpio_inen(gpionr, 0);
767         set_gpio_dir(gpionr, 0);
768
769         if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
770             == (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
771                 set_gpio_both(gpionr, 1);
772         else
773                 set_gpio_both(gpionr, 0);
774
775         if ((type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)))
776                 set_gpio_polar(gpionr, 1);      /* low or falling edge denoted by one */
777         else
778                 set_gpio_polar(gpionr, 0);      /* high or rising edge denoted by zero */
779
780         if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
781                 set_gpio_edge(gpionr, 1);
782                 set_gpio_inen(gpionr, 1);
783                 set_gpio_data(gpionr, 0);
784
785         } else {
786                 set_gpio_edge(gpionr, 0);
787                 set_gpio_inen(gpionr, 1);
788         }
789
790         if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
791                 bfin_set_irq_handler(irq, handle_edge_irq);
792         else
793                 bfin_set_irq_handler(irq, handle_level_irq);
794
795         return 0;
796 }
797
798 #ifdef CONFIG_PM
799 static int bfin_gpio_set_wake(struct irq_data *d, unsigned int state)
800 {
801         return gpio_pm_wakeup_ctrl(irq_to_gpio(d->irq), state);
802 }
803 #else
804 # define bfin_gpio_set_wake NULL
805 #endif
806
807 static void bfin_demux_gpio_block(unsigned int irq)
808 {
809         unsigned int gpio, mask;
810
811         gpio = irq_to_gpio(irq);
812         mask = get_gpiop_data(gpio) & get_gpiop_maska(gpio);
813
814         while (mask) {
815                 if (mask & 1)
816                         bfin_handle_irq(irq);
817                 irq++;
818                 mask >>= 1;
819         }
820 }
821
822 void bfin_demux_gpio_irq(unsigned int inta_irq,
823                         struct irq_desc *desc)
824 {
825         unsigned int irq;
826
827         switch (inta_irq) {
828 #if defined(BF537_FAMILY)
829         case IRQ_PF_INTA_PG_INTA:
830                 bfin_demux_gpio_block(IRQ_PF0);
831                 irq = IRQ_PG0;
832                 break;
833         case IRQ_PH_INTA_MAC_RX:
834                 irq = IRQ_PH0;
835                 break;
836 #elif defined(BF533_FAMILY)
837         case IRQ_PROG_INTA:
838                 irq = IRQ_PF0;
839                 break;
840 #elif defined(BF538_FAMILY)
841         case IRQ_PORTF_INTA:
842                 irq = IRQ_PF0;
843                 break;
844 #elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
845         case IRQ_PORTF_INTA:
846                 irq = IRQ_PF0;
847                 break;
848         case IRQ_PORTG_INTA:
849                 irq = IRQ_PG0;
850                 break;
851         case IRQ_PORTH_INTA:
852                 irq = IRQ_PH0;
853                 break;
854 #elif defined(CONFIG_BF561)
855         case IRQ_PROG0_INTA:
856                 irq = IRQ_PF0;
857                 break;
858         case IRQ_PROG1_INTA:
859                 irq = IRQ_PF16;
860                 break;
861         case IRQ_PROG2_INTA:
862                 irq = IRQ_PF32;
863                 break;
864 #endif
865         default:
866                 BUG();
867                 return;
868         }
869
870         bfin_demux_gpio_block(irq);
871 }
872
873 #else
874
875 # ifndef CONFIG_BF60x
876 #define NR_PINT_SYS_IRQS        4
877 #define NR_PINTS                160
878 # else
879 #define NR_PINT_SYS_IRQS        6
880 #define NR_PINTS                112
881 #endif
882
883 #define NR_PINT_BITS            32
884 #define IRQ_NOT_AVAIL           0xFF
885
886 #define PINT_2_BANK(x)          ((x) >> 5)
887 #define PINT_2_BIT(x)           ((x) & 0x1F)
888 #define PINT_BIT(x)             (1 << (PINT_2_BIT(x)))
889
890 static unsigned char irq2pint_lut[NR_PINTS];
891 static unsigned char pint2irq_lut[NR_PINT_SYS_IRQS * NR_PINT_BITS];
892
893 static struct bfin_pint_regs * const pint[NR_PINT_SYS_IRQS] = {
894         (struct bfin_pint_regs *)PINT0_MASK_SET,
895         (struct bfin_pint_regs *)PINT1_MASK_SET,
896         (struct bfin_pint_regs *)PINT2_MASK_SET,
897         (struct bfin_pint_regs *)PINT3_MASK_SET,
898 #ifdef CONFIG_BF60x
899         (struct bfin_pint_regs *)PINT4_MASK_SET,
900         (struct bfin_pint_regs *)PINT5_MASK_SET,
901 #endif
902 };
903
904 #ifndef CONFIG_BF60x
905 inline unsigned int get_irq_base(u32 bank, u8 bmap)
906 {
907         unsigned int irq_base;
908
909         if (bank < 2) {         /*PA-PB */
910                 irq_base = IRQ_PA0 + bmap * 16;
911         } else {                /*PC-PJ */
912                 irq_base = IRQ_PC0 + bmap * 16;
913         }
914
915         return irq_base;
916 }
917 #else
918 inline unsigned int get_irq_base(u32 bank, u8 bmap)
919 {
920         unsigned int irq_base;
921
922         irq_base = IRQ_PA0 + bank * 16 + bmap * 16;
923
924         return irq_base;
925 }
926 #endif
927
928         /* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */
929 void init_pint_lut(void)
930 {
931         u16 bank, bit, irq_base, bit_pos;
932         u32 pint_assign;
933         u8 bmap;
934
935         memset(irq2pint_lut, IRQ_NOT_AVAIL, sizeof(irq2pint_lut));
936
937         for (bank = 0; bank < NR_PINT_SYS_IRQS; bank++) {
938
939                 pint_assign = pint[bank]->assign;
940
941                 for (bit = 0; bit < NR_PINT_BITS; bit++) {
942
943                         bmap = (pint_assign >> ((bit / 8) * 8)) & 0xFF;
944
945                         irq_base = get_irq_base(bank, bmap);
946
947                         irq_base += (bit % 8) + ((bit / 8) & 1 ? 8 : 0);
948                         bit_pos = bit + bank * NR_PINT_BITS;
949
950                         pint2irq_lut[bit_pos] = irq_base - SYS_IRQS;
951                         irq2pint_lut[irq_base - SYS_IRQS] = bit_pos;
952                 }
953         }
954 }
955
956 static void bfin_gpio_ack_irq(struct irq_data *d)
957 {
958         u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
959         u32 pintbit = PINT_BIT(pint_val);
960         u32 bank = PINT_2_BANK(pint_val);
961
962         if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) {
963                 if (pint[bank]->invert_set & pintbit)
964                         pint[bank]->invert_clear = pintbit;
965                 else
966                         pint[bank]->invert_set = pintbit;
967         }
968         pint[bank]->request = pintbit;
969
970 }
971
972 static void bfin_gpio_mask_ack_irq(struct irq_data *d)
973 {
974         u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
975         u32 pintbit = PINT_BIT(pint_val);
976         u32 bank = PINT_2_BANK(pint_val);
977
978         if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) {
979                 if (pint[bank]->invert_set & pintbit)
980                         pint[bank]->invert_clear = pintbit;
981                 else
982                         pint[bank]->invert_set = pintbit;
983         }
984
985         pint[bank]->request = pintbit;
986         pint[bank]->mask_clear = pintbit;
987 }
988
989 static void bfin_gpio_mask_irq(struct irq_data *d)
990 {
991         u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
992
993         pint[PINT_2_BANK(pint_val)]->mask_clear = PINT_BIT(pint_val);
994 }
995
996 static void bfin_gpio_unmask_irq(struct irq_data *d)
997 {
998         u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
999         u32 pintbit = PINT_BIT(pint_val);
1000         u32 bank = PINT_2_BANK(pint_val);
1001
1002         pint[bank]->mask_set = pintbit;
1003 }
1004
1005 static unsigned int bfin_gpio_irq_startup(struct irq_data *d)
1006 {
1007         unsigned int irq = d->irq;
1008         u32 gpionr = irq_to_gpio(irq);
1009         u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
1010
1011         if (pint_val == IRQ_NOT_AVAIL) {
1012                 printk(KERN_ERR
1013                 "GPIO IRQ %d :Not in PINT Assign table "
1014                 "Reconfigure Interrupt to Port Assignemt\n", irq);
1015                 return -ENODEV;
1016         }
1017
1018         if (__test_and_set_bit(gpionr, gpio_enabled))
1019                 bfin_gpio_irq_prepare(gpionr);
1020
1021         bfin_gpio_unmask_irq(d);
1022
1023         return 0;
1024 }
1025
1026 static void bfin_gpio_irq_shutdown(struct irq_data *d)
1027 {
1028         u32 gpionr = irq_to_gpio(d->irq);
1029
1030         bfin_gpio_mask_irq(d);
1031         __clear_bit(gpionr, gpio_enabled);
1032         bfin_gpio_irq_free(gpionr);
1033 }
1034
1035 static int bfin_gpio_irq_type(struct irq_data *d, unsigned int type)
1036 {
1037         unsigned int irq = d->irq;
1038         int ret;
1039         char buf[16];
1040         u32 gpionr = irq_to_gpio(irq);
1041         u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
1042         u32 pintbit = PINT_BIT(pint_val);
1043         u32 bank = PINT_2_BANK(pint_val);
1044
1045         if (pint_val == IRQ_NOT_AVAIL)
1046                 return -ENODEV;
1047
1048         if (type == IRQ_TYPE_PROBE) {
1049                 /* only probe unenabled GPIO interrupt lines */
1050                 if (test_bit(gpionr, gpio_enabled))
1051                         return 0;
1052                 type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
1053         }
1054
1055         if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING |
1056                     IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
1057
1058                 snprintf(buf, 16, "gpio-irq%d", irq);
1059                 ret = bfin_gpio_irq_request(gpionr, buf);
1060                 if (ret)
1061                         return ret;
1062
1063                 if (__test_and_set_bit(gpionr, gpio_enabled))
1064                         bfin_gpio_irq_prepare(gpionr);
1065
1066         } else {
1067                 __clear_bit(gpionr, gpio_enabled);
1068                 return 0;
1069         }
1070
1071         if ((type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)))
1072                 pint[bank]->invert_set = pintbit;       /* low or falling edge denoted by one */
1073         else
1074                 pint[bank]->invert_clear = pintbit;     /* high or rising edge denoted by zero */
1075
1076         if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
1077             == (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
1078                 if (gpio_get_value(gpionr))
1079                         pint[bank]->invert_set = pintbit;
1080                 else
1081                         pint[bank]->invert_clear = pintbit;
1082         }
1083
1084         if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
1085                 pint[bank]->edge_set = pintbit;
1086                 bfin_set_irq_handler(irq, handle_edge_irq);
1087         } else {
1088                 pint[bank]->edge_clear = pintbit;
1089                 bfin_set_irq_handler(irq, handle_level_irq);
1090         }
1091
1092         return 0;
1093 }
1094
1095 #ifdef CONFIG_PM
1096 static int bfin_gpio_set_wake(struct irq_data *d, unsigned int state)
1097 {
1098         u32 pint_irq;
1099         u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
1100         u32 bank = PINT_2_BANK(pint_val);
1101
1102         switch (bank) {
1103         case 0:
1104                 pint_irq = IRQ_PINT0;
1105                 break;
1106         case 2:
1107                 pint_irq = IRQ_PINT2;
1108                 break;
1109         case 3:
1110                 pint_irq = IRQ_PINT3;
1111                 break;
1112         case 1:
1113                 pint_irq = IRQ_PINT1;
1114                 break;
1115 #ifdef CONFIG_BF60x
1116         case 4:
1117                 pint_irq = IRQ_PINT4;
1118                 break;
1119         case 5:
1120                 pint_irq = IRQ_PINT5;
1121                 break;
1122 #endif
1123         default:
1124                 return -EINVAL;
1125         }
1126
1127         bfin_internal_set_wake(pint_irq, state);
1128
1129         return 0;
1130 }
1131 #else
1132 # define bfin_gpio_set_wake NULL
1133 #endif
1134
1135 void bfin_demux_gpio_irq(unsigned int inta_irq,
1136                         struct irq_desc *desc)
1137 {
1138         u32 bank, pint_val;
1139         u32 request, irq;
1140         u32 level_mask;
1141         int umask = 0;
1142         struct irq_chip *chip = irq_desc_get_chip(desc);
1143
1144         if (chip->irq_mask_ack) {
1145                 chip->irq_mask_ack(&desc->irq_data);
1146         } else {
1147                 chip->irq_mask(&desc->irq_data);
1148                 if (chip->irq_ack)
1149                         chip->irq_ack(&desc->irq_data);
1150         }
1151
1152         switch (inta_irq) {
1153         case IRQ_PINT0:
1154                 bank = 0;
1155                 break;
1156         case IRQ_PINT2:
1157                 bank = 2;
1158                 break;
1159         case IRQ_PINT3:
1160                 bank = 3;
1161                 break;
1162         case IRQ_PINT1:
1163                 bank = 1;
1164                 break;
1165 #ifdef CONFIG_BF60x
1166         case IRQ_PINT4:
1167                 bank = 4;
1168                 break;
1169         case IRQ_PINT5:
1170                 bank = 5;
1171                 break;
1172 #endif
1173         default:
1174                 return;
1175         }
1176
1177         pint_val = bank * NR_PINT_BITS;
1178
1179         request = pint[bank]->request;
1180
1181         level_mask = pint[bank]->edge_set & request;
1182
1183         while (request) {
1184                 if (request & 1) {
1185                         irq = pint2irq_lut[pint_val] + SYS_IRQS;
1186                         if (level_mask & PINT_BIT(pint_val)) {
1187                                 umask = 1;
1188                                 chip->irq_unmask(&desc->irq_data);
1189                         }
1190                         bfin_handle_irq(irq);
1191                 }
1192                 pint_val++;
1193                 request >>= 1;
1194         }
1195
1196         if (!umask)
1197                 chip->irq_unmask(&desc->irq_data);
1198 }
1199 #endif
1200
1201 static struct irq_chip bfin_gpio_irqchip = {
1202         .name = "GPIO",
1203         .irq_ack = bfin_gpio_ack_irq,
1204         .irq_mask = bfin_gpio_mask_irq,
1205         .irq_mask_ack = bfin_gpio_mask_ack_irq,
1206         .irq_unmask = bfin_gpio_unmask_irq,
1207         .irq_disable = bfin_gpio_mask_irq,
1208         .irq_enable = bfin_gpio_unmask_irq,
1209         .irq_set_type = bfin_gpio_irq_type,
1210         .irq_startup = bfin_gpio_irq_startup,
1211         .irq_shutdown = bfin_gpio_irq_shutdown,
1212         .irq_set_wake = bfin_gpio_set_wake,
1213 };
1214
1215 void __cpuinit init_exception_vectors(void)
1216 {
1217         /* cannot program in software:
1218          * evt0 - emulation (jtag)
1219          * evt1 - reset
1220          */
1221         bfin_write_EVT2(evt_nmi);
1222         bfin_write_EVT3(trap);
1223         bfin_write_EVT5(evt_ivhw);
1224         bfin_write_EVT6(evt_timer);
1225         bfin_write_EVT7(evt_evt7);
1226         bfin_write_EVT8(evt_evt8);
1227         bfin_write_EVT9(evt_evt9);
1228         bfin_write_EVT10(evt_evt10);
1229         bfin_write_EVT11(evt_evt11);
1230         bfin_write_EVT12(evt_evt12);
1231         bfin_write_EVT13(evt_evt13);
1232         bfin_write_EVT14(evt_evt14);
1233         bfin_write_EVT15(evt_system_call);
1234         CSYNC();
1235 }
1236
1237 /*
1238  * This function should be called during kernel startup to initialize
1239  * the BFin IRQ handling routines.
1240  */
1241
1242 int __init init_arch_irq(void)
1243 {
1244         int irq;
1245         unsigned long ilat = 0;
1246
1247 #ifndef CONFIG_BF60x
1248         /*  Disable all the peripheral intrs  - page 4-29 HW Ref manual */
1249 #ifdef SIC_IMASK0
1250         bfin_write_SIC_IMASK0(SIC_UNMASK_ALL);
1251         bfin_write_SIC_IMASK1(SIC_UNMASK_ALL);
1252 # ifdef SIC_IMASK2
1253         bfin_write_SIC_IMASK2(SIC_UNMASK_ALL);
1254 # endif
1255 # if defined(CONFIG_SMP) || defined(CONFIG_ICC)
1256         bfin_write_SICB_IMASK0(SIC_UNMASK_ALL);
1257         bfin_write_SICB_IMASK1(SIC_UNMASK_ALL);
1258 # endif
1259 #else
1260         bfin_write_SIC_IMASK(SIC_UNMASK_ALL);
1261 #endif
1262 #else /* CONFIG_BF60x */
1263         bfin_write_SEC_GCTL(SEC_GCTL_RESET);
1264 #endif
1265
1266         local_irq_disable();
1267
1268 #if BFIN_GPIO_PINT
1269 # ifdef CONFIG_PINTx_REASSIGN
1270         pint[0]->assign = CONFIG_PINT0_ASSIGN;
1271         pint[1]->assign = CONFIG_PINT1_ASSIGN;
1272         pint[2]->assign = CONFIG_PINT2_ASSIGN;
1273         pint[3]->assign = CONFIG_PINT3_ASSIGN;
1274 # ifdef CONFIG_BF60x
1275         pint[4]->assign = CONFIG_PINT4_ASSIGN;
1276         pint[5]->assign = CONFIG_PINT5_ASSIGN;
1277 # endif
1278 # endif
1279         /* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */
1280         init_pint_lut();
1281 #endif
1282
1283         for (irq = 0; irq <= SYS_IRQS; irq++) {
1284                 if (irq <= IRQ_CORETMR)
1285                         irq_set_chip(irq, &bfin_core_irqchip);
1286                 else
1287                         irq_set_chip(irq, &bfin_internal_irqchip);
1288
1289                 switch (irq) {
1290 #ifndef CONFIG_BF60x
1291 #if BFIN_GPIO_PINT
1292                 case IRQ_PINT0:
1293                 case IRQ_PINT1:
1294                 case IRQ_PINT2:
1295                 case IRQ_PINT3:
1296 #elif defined(BF537_FAMILY)
1297                 case IRQ_PH_INTA_MAC_RX:
1298                 case IRQ_PF_INTA_PG_INTA:
1299 #elif defined(BF533_FAMILY)
1300                 case IRQ_PROG_INTA:
1301 #elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
1302                 case IRQ_PORTF_INTA:
1303                 case IRQ_PORTG_INTA:
1304                 case IRQ_PORTH_INTA:
1305 #elif defined(CONFIG_BF561)
1306                 case IRQ_PROG0_INTA:
1307                 case IRQ_PROG1_INTA:
1308                 case IRQ_PROG2_INTA:
1309 #elif defined(BF538_FAMILY)
1310                 case IRQ_PORTF_INTA:
1311 #endif
1312                         irq_set_chained_handler(irq, bfin_demux_gpio_irq);
1313                         break;
1314 #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
1315                 case IRQ_MAC_ERROR:
1316                         irq_set_chained_handler(irq,
1317                                                 bfin_demux_mac_status_irq);
1318                         break;
1319 #endif
1320 #if defined(CONFIG_SMP) || defined(CONFIG_ICC)
1321                 case IRQ_SUPPLE_0:
1322                 case IRQ_SUPPLE_1:
1323                         irq_set_handler(irq, handle_percpu_irq);
1324                         break;
1325 #endif
1326 #endif
1327
1328 #ifdef CONFIG_TICKSOURCE_CORETMR
1329                 case IRQ_CORETMR:
1330 # ifdef CONFIG_SMP
1331                         irq_set_handler(irq, handle_percpu_irq);
1332 # else
1333                         irq_set_handler(irq, handle_simple_irq);
1334 # endif
1335                         break;
1336 #endif
1337
1338 #ifdef CONFIG_TICKSOURCE_GPTMR0
1339                 case IRQ_TIMER0:
1340                         irq_set_handler(irq, handle_simple_irq);
1341                         break;
1342 #endif
1343
1344                 default:
1345 #ifdef CONFIG_IPIPE
1346                         irq_set_handler(irq, handle_level_irq);
1347 #else
1348                         irq_set_handler(irq, handle_simple_irq);
1349 #endif
1350                         break;
1351                 }
1352         }
1353
1354         init_mach_irq();
1355
1356 #ifndef CONFIG_BF60x
1357 #if (defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)) && !defined(CONFIG_BF60x)
1358         for (irq = IRQ_MAC_PHYINT; irq <= IRQ_MAC_STMDONE; irq++)
1359                 irq_set_chip_and_handler(irq, &bfin_mac_status_irqchip,
1360                                          handle_level_irq);
1361 #endif
1362         /* if configured as edge, then will be changed to do_edge_IRQ */
1363         for (irq = GPIO_IRQ_BASE;
1364                 irq < (GPIO_IRQ_BASE + MAX_BLACKFIN_GPIOS); irq++)
1365                 irq_set_chip_and_handler(irq, &bfin_gpio_irqchip,
1366                                          handle_level_irq);
1367 #else
1368         for (irq = BFIN_IRQ(0); irq <= SYS_IRQS; irq++) {
1369                 if (irq < CORE_IRQS && irq != IRQ_CGU_EVT) {
1370                         irq_set_chip(irq, &bfin_sec_irqchip);
1371                         __irq_set_handler(irq, handle_sec_fault, 0, NULL);
1372                 } else if (irq >= BFIN_IRQ(21) && irq <= BFIN_IRQ(26)) {
1373                         irq_set_chip(irq, &bfin_sec_irqchip);
1374                         irq_set_chained_handler(irq, bfin_demux_gpio_irq);
1375                 } else if (irq >= BFIN_IRQ(34) && irq <= BFIN_IRQ(37)) {
1376                         irq_set_chip(irq, &bfin_sec_irqchip);
1377                         irq_set_handler(irq, handle_percpu_irq);
1378                 } else {
1379                         irq_set_chip_and_handler(irq, &bfin_sec_irqchip,
1380                                         handle_fasteoi_irq);
1381                         __irq_set_preflow_handler(irq, bfin_sec_preflow_handler);
1382                 }
1383         }
1384         for (irq = GPIO_IRQ_BASE;
1385                 irq < (GPIO_IRQ_BASE + MAX_BLACKFIN_GPIOS); irq++)
1386                 irq_set_chip_and_handler(irq, &bfin_gpio_irqchip,
1387                                         handle_level_irq);
1388 #endif
1389         bfin_write_IMASK(0);
1390         CSYNC();
1391         ilat = bfin_read_ILAT();
1392         CSYNC();
1393         bfin_write_ILAT(ilat);
1394         CSYNC();
1395
1396         printk(KERN_INFO "Configuring Blackfin Priority Driven Interrupts\n");
1397         /* IMASK=xxx is equivalent to STI xx or bfin_irq_flags=xx,
1398          * local_irq_enable()
1399          */
1400 #ifndef CONFIG_BF60x
1401         program_IAR();
1402         /* Therefore it's better to setup IARs before interrupts enabled */
1403         search_IAR();
1404
1405         /* Enable interrupts IVG7-15 */
1406         bfin_irq_flags |= IMASK_IVG15 |
1407                 IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
1408                 IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
1409
1410         bfin_sti(bfin_irq_flags);
1411
1412         /* This implicitly covers ANOMALY_05000171
1413          * Boot-ROM code modifies SICA_IWRx wakeup registers
1414          */
1415 #ifdef SIC_IWR0
1416         bfin_write_SIC_IWR0(IWR_DISABLE_ALL);
1417 # ifdef SIC_IWR1
1418         /* BF52x/BF51x system reset does not properly reset SIC_IWR1 which
1419          * will screw up the bootrom as it relies on MDMA0/1 waking it
1420          * up from IDLE instructions.  See this report for more info:
1421          * http://blackfin.uclinux.org/gf/tracker/4323
1422          */
1423         if (ANOMALY_05000435)
1424                 bfin_write_SIC_IWR1(IWR_ENABLE(10) | IWR_ENABLE(11));
1425         else
1426                 bfin_write_SIC_IWR1(IWR_DISABLE_ALL);
1427 # endif
1428 # ifdef SIC_IWR2
1429         bfin_write_SIC_IWR2(IWR_DISABLE_ALL);
1430 # endif
1431 #else
1432         bfin_write_SIC_IWR(IWR_DISABLE_ALL);
1433 #endif
1434 #else  /* CONFIG_BF60x */
1435         /* Enable interrupts IVG7-15 */
1436         bfin_irq_flags |= IMASK_IVG15 |
1437             IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
1438             IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
1439
1440
1441         bfin_write_SEC_FCTL(SEC_FCTL_EN | SEC_FCTL_SYSRST_EN | SEC_FCTL_FLTIN_EN);
1442         bfin_sec_enable_sci(SIC_SYSIRQ(IRQ_WATCH0));
1443         bfin_sec_enable_ssi(SIC_SYSIRQ(IRQ_WATCH0));
1444         bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_RESET);
1445         udelay(100);
1446         bfin_write_SEC_GCTL(SEC_GCTL_EN);
1447         bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_EN | SEC_CCTL_NMI_EN);
1448         init_software_driven_irq();
1449         register_syscore_ops(&sec_pm_syscore_ops);
1450 #endif
1451         return 0;
1452 }
1453
1454 #ifdef CONFIG_DO_IRQ_L1
1455 __attribute__((l1_text))
1456 #endif
1457 static int vec_to_irq(int vec)
1458 {
1459 #ifndef CONFIG_BF60x
1460         struct ivgx *ivg = ivg7_13[vec - IVG7].ifirst;
1461         struct ivgx *ivg_stop = ivg7_13[vec - IVG7].istop;
1462         unsigned long sic_status[3];
1463 #endif
1464         if (likely(vec == EVT_IVTMR_P))
1465                 return IRQ_CORETMR;
1466 #ifndef CONFIG_BF60x
1467 #ifdef SIC_ISR
1468         sic_status[0] = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();
1469 #else
1470         if (smp_processor_id()) {
1471 # ifdef SICB_ISR0
1472                 /* This will be optimized out in UP mode. */
1473                 sic_status[0] = bfin_read_SICB_ISR0() & bfin_read_SICB_IMASK0();
1474                 sic_status[1] = bfin_read_SICB_ISR1() & bfin_read_SICB_IMASK1();
1475 # endif
1476         } else {
1477                 sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
1478                 sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
1479         }
1480 #endif
1481 #ifdef SIC_ISR2
1482         sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
1483 #endif
1484
1485         for (;; ivg++) {
1486                 if (ivg >= ivg_stop)
1487                         return -1;
1488 #ifdef SIC_ISR
1489                 if (sic_status[0] & ivg->isrflag)
1490 #else
1491                 if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag)
1492 #endif
1493                         return ivg->irqno;
1494         }
1495 #else
1496         /* for bf60x read */
1497         return BFIN_IRQ(bfin_read_SEC_SCI(0, SEC_CSID));
1498 #endif  /* end of CONFIG_BF60x */
1499 }
1500
1501 #ifdef CONFIG_DO_IRQ_L1
1502 __attribute__((l1_text))
1503 #endif
1504 void do_irq(int vec, struct pt_regs *fp)
1505 {
1506         int irq = vec_to_irq(vec);
1507         if (irq == -1)
1508                 return;
1509         asm_do_IRQ(irq, fp);
1510 }
1511
1512 #ifdef CONFIG_IPIPE
1513
1514 int __ipipe_get_irq_priority(unsigned irq)
1515 {
1516         int ient, prio;
1517
1518         if (irq <= IRQ_CORETMR)
1519                 return irq;
1520
1521         for (ient = 0; ient < NR_PERI_INTS; ient++) {
1522                 struct ivgx *ivg = ivg_table + ient;
1523                 if (ivg->irqno == irq) {
1524                         for (prio = 0; prio <= IVG13-IVG7; prio++) {
1525                                 if (ivg7_13[prio].ifirst <= ivg &&
1526                                     ivg7_13[prio].istop > ivg)
1527                                         return IVG7 + prio;
1528                         }
1529                 }
1530         }
1531
1532         return IVG15;
1533 }
1534
1535 /* Hw interrupts are disabled on entry (check SAVE_CONTEXT). */
1536 #ifdef CONFIG_DO_IRQ_L1
1537 __attribute__((l1_text))
1538 #endif
1539 asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
1540 {
1541         struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();
1542         struct ipipe_domain *this_domain = __ipipe_current_domain;
1543         struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop;
1544         struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst;
1545         int irq, s = 0;
1546
1547         irq = vec_to_irq(vec);
1548         if (irq == -1)
1549                 return 0;
1550
1551         if (irq == IRQ_SYSTMR) {
1552 #if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_TICKSOURCE_GPTMR0)
1553                 bfin_write_TIMER_STATUS(1); /* Latch TIMIL0 */
1554 #endif
1555                 /* This is basically what we need from the register frame. */
1556                 __raw_get_cpu_var(__ipipe_tick_regs).ipend = regs->ipend;
1557                 __raw_get_cpu_var(__ipipe_tick_regs).pc = regs->pc;
1558                 if (this_domain != ipipe_root_domain)
1559                         __raw_get_cpu_var(__ipipe_tick_regs).ipend &= ~0x10;
1560                 else
1561                         __raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10;
1562         }
1563
1564         /*
1565          * We don't want Linux interrupt handlers to run at the
1566          * current core priority level (i.e. < EVT15), since this
1567          * might delay other interrupts handled by a high priority
1568          * domain. Here is what we do instead:
1569          *
1570          * - we raise the SYNCDEFER bit to prevent
1571          * __ipipe_handle_irq() to sync the pipeline for the root
1572          * stage for the incoming interrupt. Upon return, that IRQ is
1573          * pending in the interrupt log.
1574          *
1575          * - we raise the TIF_IRQ_SYNC bit for the current thread, so
1576          * that _schedule_and_signal_from_int will eventually sync the
1577          * pipeline from EVT15.
1578          */
1579         if (this_domain == ipipe_root_domain) {
1580                 s = __test_and_set_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
1581                 barrier();
1582         }
1583
1584         ipipe_trace_irq_entry(irq);
1585         __ipipe_handle_irq(irq, regs);
1586         ipipe_trace_irq_exit(irq);
1587
1588         if (user_mode(regs) &&
1589             !ipipe_test_foreign_stack() &&
1590             (current->ipipe_flags & PF_EVTRET) != 0) {
1591                 /*
1592                  * Testing for user_regs() does NOT fully eliminate
1593                  * foreign stack contexts, because of the forged
1594                  * interrupt returns we do through
1595                  * __ipipe_call_irqtail. In that case, we might have
1596                  * preempted a foreign stack context in a high
1597                  * priority domain, with a single interrupt level now
1598                  * pending after the irqtail unwinding is done. In
1599                  * which case user_mode() is now true, and the event
1600                  * gets dispatched spuriously.
1601                  */
1602                 current->ipipe_flags &= ~PF_EVTRET;
1603                 __ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs);
1604         }
1605
1606         if (this_domain == ipipe_root_domain) {
1607                 set_thread_flag(TIF_IRQ_SYNC);
1608                 if (!s) {
1609                         __clear_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
1610                         return !test_bit(IPIPE_STALL_FLAG, &p->status);
1611                 }
1612         }
1613
1614         return 0;
1615 }
1616
1617 #endif /* CONFIG_IPIPE */