2 * arch/powerpc/sysdev/ipic.c
4 * IPIC routines implementations.
6 * Copyright 2005 Freescale Semiconductor, Inc.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/errno.h>
16 #include <linux/reboot.h>
17 #include <linux/slab.h>
18 #include <linux/stddef.h>
19 #include <linux/sched.h>
20 #include <linux/signal.h>
21 #include <linux/syscore_ops.h>
22 #include <linux/device.h>
23 #include <linux/spinlock.h>
24 #include <linux/fsl_devices.h>
32 static struct ipic * primary_ipic;
33 static struct irq_chip ipic_level_irq_chip, ipic_edge_irq_chip;
34 static DEFINE_RAW_SPINLOCK(ipic_lock);
36 static struct ipic_info ipic_info[] = {
40 .force = IPIC_SIFCR_H,
47 .force = IPIC_SIFCR_H,
54 .force = IPIC_SIFCR_H,
61 .force = IPIC_SIFCR_H,
68 .force = IPIC_SIFCR_H,
75 .force = IPIC_SIFCR_H,
82 .force = IPIC_SIFCR_H,
89 .force = IPIC_SIFCR_H,
96 .force = IPIC_SIFCR_H,
101 .mask = IPIC_SIMSR_H,
102 .prio = IPIC_SIPRR_D,
103 .force = IPIC_SIFCR_H,
108 .mask = IPIC_SIMSR_H,
109 .prio = IPIC_SIPRR_D,
110 .force = IPIC_SIFCR_H,
115 .mask = IPIC_SIMSR_H,
116 .prio = IPIC_SIPRR_D,
117 .force = IPIC_SIFCR_H,
122 .mask = IPIC_SIMSR_H,
123 .prio = IPIC_SIPRR_D,
124 .force = IPIC_SIFCR_H,
129 .mask = IPIC_SIMSR_H,
130 .prio = IPIC_SIPRR_D,
131 .force = IPIC_SIFCR_H,
136 .mask = IPIC_SIMSR_H,
137 .prio = IPIC_SIPRR_D,
138 .force = IPIC_SIFCR_H,
143 .mask = IPIC_SIMSR_H,
144 .prio = IPIC_SIPRR_D,
145 .force = IPIC_SIFCR_H,
152 .prio = IPIC_SMPRR_A,
160 .prio = IPIC_SMPRR_A,
168 .prio = IPIC_SMPRR_A,
176 .prio = IPIC_SMPRR_B,
184 .prio = IPIC_SMPRR_B,
192 .prio = IPIC_SMPRR_B,
200 .prio = IPIC_SMPRR_B,
206 .mask = IPIC_SIMSR_H,
207 .prio = IPIC_SIPRR_A,
208 .force = IPIC_SIFCR_H,
213 .mask = IPIC_SIMSR_H,
214 .prio = IPIC_SIPRR_A,
215 .force = IPIC_SIFCR_H,
220 .mask = IPIC_SIMSR_H,
221 .prio = IPIC_SIPRR_A,
222 .force = IPIC_SIFCR_H,
227 .mask = IPIC_SIMSR_H,
228 .prio = IPIC_SIPRR_A,
229 .force = IPIC_SIFCR_H,
234 .mask = IPIC_SIMSR_H,
235 .prio = IPIC_SIPRR_A,
236 .force = IPIC_SIFCR_H,
241 .mask = IPIC_SIMSR_H,
242 .prio = IPIC_SIPRR_A,
243 .force = IPIC_SIFCR_H,
248 .mask = IPIC_SIMSR_H,
249 .prio = IPIC_SIPRR_A,
250 .force = IPIC_SIFCR_H,
255 .mask = IPIC_SIMSR_H,
256 .prio = IPIC_SIPRR_A,
257 .force = IPIC_SIFCR_H,
262 .mask = IPIC_SIMSR_H,
263 .prio = IPIC_SIPRR_B,
264 .force = IPIC_SIFCR_H,
269 .mask = IPIC_SIMSR_H,
270 .prio = IPIC_SIPRR_B,
271 .force = IPIC_SIFCR_H,
276 .mask = IPIC_SIMSR_H,
277 .prio = IPIC_SIPRR_B,
278 .force = IPIC_SIFCR_H,
283 .mask = IPIC_SIMSR_H,
284 .prio = IPIC_SIPRR_B,
285 .force = IPIC_SIFCR_H,
290 .mask = IPIC_SIMSR_H,
291 .prio = IPIC_SIPRR_B,
292 .force = IPIC_SIFCR_H,
297 .mask = IPIC_SIMSR_H,
298 .prio = IPIC_SIPRR_B,
299 .force = IPIC_SIFCR_H,
304 .mask = IPIC_SIMSR_H,
305 .prio = IPIC_SIPRR_B,
306 .force = IPIC_SIFCR_H,
311 .mask = IPIC_SIMSR_H,
312 .prio = IPIC_SIPRR_B,
313 .force = IPIC_SIFCR_H,
319 .prio = IPIC_SMPRR_A,
325 .mask = IPIC_SIMSR_L,
326 .prio = IPIC_SMPRR_A,
327 .force = IPIC_SIFCR_L,
332 .mask = IPIC_SIMSR_L,
333 .prio = IPIC_SMPRR_A,
334 .force = IPIC_SIFCR_L,
339 .mask = IPIC_SIMSR_L,
340 .prio = IPIC_SMPRR_A,
341 .force = IPIC_SIFCR_L,
346 .mask = IPIC_SIMSR_L,
347 .prio = IPIC_SMPRR_A,
348 .force = IPIC_SIFCR_L,
353 .mask = IPIC_SIMSR_L,
354 .prio = IPIC_SMPRR_B,
355 .force = IPIC_SIFCR_L,
360 .mask = IPIC_SIMSR_L,
361 .prio = IPIC_SMPRR_B,
362 .force = IPIC_SIFCR_L,
367 .mask = IPIC_SIMSR_L,
368 .prio = IPIC_SMPRR_B,
369 .force = IPIC_SIFCR_L,
374 .mask = IPIC_SIMSR_L,
375 .prio = IPIC_SMPRR_B,
376 .force = IPIC_SIFCR_L,
381 .mask = IPIC_SIMSR_L,
383 .force = IPIC_SIFCR_L,
387 .mask = IPIC_SIMSR_L,
389 .force = IPIC_SIFCR_L,
393 .mask = IPIC_SIMSR_L,
395 .force = IPIC_SIFCR_L,
399 .mask = IPIC_SIMSR_L,
401 .force = IPIC_SIFCR_L,
405 .mask = IPIC_SIMSR_L,
407 .force = IPIC_SIFCR_L,
411 .mask = IPIC_SIMSR_L,
413 .force = IPIC_SIFCR_L,
417 .mask = IPIC_SIMSR_L,
419 .force = IPIC_SIFCR_L,
423 .mask = IPIC_SIMSR_L,
425 .force = IPIC_SIFCR_L,
429 .mask = IPIC_SIMSR_L,
431 .force = IPIC_SIFCR_L,
435 .mask = IPIC_SIMSR_L,
437 .force = IPIC_SIFCR_L,
441 .mask = IPIC_SIMSR_L,
443 .force = IPIC_SIFCR_L,
447 .mask = IPIC_SIMSR_L,
449 .force = IPIC_SIFCR_L,
453 .mask = IPIC_SIMSR_L,
455 .force = IPIC_SIFCR_L,
459 .mask = IPIC_SIMSR_L,
461 .force = IPIC_SIFCR_L,
465 .mask = IPIC_SIMSR_L,
467 .force = IPIC_SIFCR_L,
471 .mask = IPIC_SIMSR_L,
473 .force = IPIC_SIFCR_L,
477 .mask = IPIC_SIMSR_L,
479 .force = IPIC_SIFCR_L,
483 .mask = IPIC_SIMSR_L,
485 .force = IPIC_SIFCR_L,
489 .mask = IPIC_SIMSR_L,
491 .force = IPIC_SIFCR_L,
495 .mask = IPIC_SIMSR_L,
497 .force = IPIC_SIFCR_L,
501 .mask = IPIC_SIMSR_L,
503 .force = IPIC_SIFCR_L,
508 static inline u32 ipic_read(volatile u32 __iomem *base, unsigned int reg)
510 return in_be32(base + (reg >> 2));
513 static inline void ipic_write(volatile u32 __iomem *base, unsigned int reg, u32 value)
515 out_be32(base + (reg >> 2), value);
518 static inline struct ipic * ipic_from_irq(unsigned int virq)
523 static void ipic_unmask_irq(struct irq_data *d)
525 struct ipic *ipic = ipic_from_irq(d->irq);
526 unsigned int src = irqd_to_hwirq(d);
530 raw_spin_lock_irqsave(&ipic_lock, flags);
532 temp = ipic_read(ipic->regs, ipic_info[src].mask);
533 temp |= (1 << (31 - ipic_info[src].bit));
534 ipic_write(ipic->regs, ipic_info[src].mask, temp);
536 raw_spin_unlock_irqrestore(&ipic_lock, flags);
539 static void ipic_mask_irq(struct irq_data *d)
541 struct ipic *ipic = ipic_from_irq(d->irq);
542 unsigned int src = irqd_to_hwirq(d);
546 raw_spin_lock_irqsave(&ipic_lock, flags);
548 temp = ipic_read(ipic->regs, ipic_info[src].mask);
549 temp &= ~(1 << (31 - ipic_info[src].bit));
550 ipic_write(ipic->regs, ipic_info[src].mask, temp);
552 /* mb() can't guarantee that masking is finished. But it does finish
553 * for nearly all cases. */
556 raw_spin_unlock_irqrestore(&ipic_lock, flags);
559 static void ipic_ack_irq(struct irq_data *d)
561 struct ipic *ipic = ipic_from_irq(d->irq);
562 unsigned int src = irqd_to_hwirq(d);
566 raw_spin_lock_irqsave(&ipic_lock, flags);
568 temp = 1 << (31 - ipic_info[src].bit);
569 ipic_write(ipic->regs, ipic_info[src].ack, temp);
571 /* mb() can't guarantee that ack is finished. But it does finish
572 * for nearly all cases. */
575 raw_spin_unlock_irqrestore(&ipic_lock, flags);
578 static void ipic_mask_irq_and_ack(struct irq_data *d)
580 struct ipic *ipic = ipic_from_irq(d->irq);
581 unsigned int src = irqd_to_hwirq(d);
585 raw_spin_lock_irqsave(&ipic_lock, flags);
587 temp = ipic_read(ipic->regs, ipic_info[src].mask);
588 temp &= ~(1 << (31 - ipic_info[src].bit));
589 ipic_write(ipic->regs, ipic_info[src].mask, temp);
591 temp = 1 << (31 - ipic_info[src].bit);
592 ipic_write(ipic->regs, ipic_info[src].ack, temp);
594 /* mb() can't guarantee that ack is finished. But it does finish
595 * for nearly all cases. */
598 raw_spin_unlock_irqrestore(&ipic_lock, flags);
601 static int ipic_set_irq_type(struct irq_data *d, unsigned int flow_type)
603 struct ipic *ipic = ipic_from_irq(d->irq);
604 unsigned int src = irqd_to_hwirq(d);
605 unsigned int vold, vnew, edibit;
607 if (flow_type == IRQ_TYPE_NONE)
608 flow_type = IRQ_TYPE_LEVEL_LOW;
610 /* ipic supports only low assertion and high-to-low change senses
612 if (!(flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING))) {
613 printk(KERN_ERR "ipic: sense type 0x%x not supported\n",
617 /* ipic supports only edge mode on external interrupts */
618 if ((flow_type & IRQ_TYPE_EDGE_FALLING) && !ipic_info[src].ack) {
619 printk(KERN_ERR "ipic: edge sense not supported on internal "
625 irqd_set_trigger_type(d, flow_type);
626 if (flow_type & IRQ_TYPE_LEVEL_LOW) {
627 irq_set_handler_locked(d, handle_level_irq);
628 d->chip = &ipic_level_irq_chip;
630 irq_set_handler_locked(d, handle_edge_irq);
631 d->chip = &ipic_edge_irq_chip;
634 /* only EXT IRQ senses are programmable on ipic
635 * internal IRQ senses are LEVEL_LOW
637 if (src == IPIC_IRQ_EXT0)
640 if (src >= IPIC_IRQ_EXT1 && src <= IPIC_IRQ_EXT7)
641 edibit = (14 - (src - IPIC_IRQ_EXT1));
643 return (flow_type & IRQ_TYPE_LEVEL_LOW) ? 0 : -EINVAL;
645 vold = ipic_read(ipic->regs, IPIC_SECNR);
646 if ((flow_type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_FALLING) {
647 vnew = vold | (1 << edibit);
649 vnew = vold & ~(1 << edibit);
652 ipic_write(ipic->regs, IPIC_SECNR, vnew);
653 return IRQ_SET_MASK_OK_NOCOPY;
656 /* level interrupts and edge interrupts have different ack operations */
657 static struct irq_chip ipic_level_irq_chip = {
659 .irq_unmask = ipic_unmask_irq,
660 .irq_mask = ipic_mask_irq,
661 .irq_mask_ack = ipic_mask_irq,
662 .irq_set_type = ipic_set_irq_type,
665 static struct irq_chip ipic_edge_irq_chip = {
667 .irq_unmask = ipic_unmask_irq,
668 .irq_mask = ipic_mask_irq,
669 .irq_mask_ack = ipic_mask_irq_and_ack,
670 .irq_ack = ipic_ack_irq,
671 .irq_set_type = ipic_set_irq_type,
674 static int ipic_host_match(struct irq_domain *h, struct device_node *node,
675 enum irq_domain_bus_token bus_token)
677 /* Exact match, unless ipic node is NULL */
678 struct device_node *of_node = irq_domain_get_of_node(h);
679 return of_node == NULL || of_node == node;
682 static int ipic_host_map(struct irq_domain *h, unsigned int virq,
685 struct ipic *ipic = h->host_data;
687 irq_set_chip_data(virq, ipic);
688 irq_set_chip_and_handler(virq, &ipic_level_irq_chip, handle_level_irq);
690 /* Set default irq type */
691 irq_set_irq_type(virq, IRQ_TYPE_NONE);
696 static const struct irq_domain_ops ipic_host_ops = {
697 .match = ipic_host_match,
698 .map = ipic_host_map,
699 .xlate = irq_domain_xlate_onetwocell,
702 struct ipic * __init ipic_init(struct device_node *node, unsigned int flags)
708 ret = of_address_to_resource(node, 0, &res);
712 ipic = kzalloc(sizeof(*ipic), GFP_KERNEL);
716 ipic->irqhost = irq_domain_add_linear(node, NR_IPIC_INTS,
717 &ipic_host_ops, ipic);
718 if (ipic->irqhost == NULL) {
723 ipic->regs = ioremap(res.start, resource_size(&res));
726 ipic_write(ipic->regs, IPIC_SICNR, 0x0);
728 /* default priority scheme is grouped. If spread mode is required
729 * configure SICFR accordingly */
730 if (flags & IPIC_SPREADMODE_GRP_A)
732 if (flags & IPIC_SPREADMODE_GRP_B)
734 if (flags & IPIC_SPREADMODE_GRP_C)
736 if (flags & IPIC_SPREADMODE_GRP_D)
738 if (flags & IPIC_SPREADMODE_MIX_A)
740 if (flags & IPIC_SPREADMODE_MIX_B)
743 ipic_write(ipic->regs, IPIC_SICFR, temp);
745 /* handle MCP route */
747 if (flags & IPIC_DISABLE_MCP_OUT)
749 ipic_write(ipic->regs, IPIC_SERCR, temp);
751 /* handle routing of IRQ0 to MCP */
752 temp = ipic_read(ipic->regs, IPIC_SEMSR);
754 if (flags & IPIC_IRQ0_MCP)
757 temp &= ~SEMSR_SIRQ0;
759 ipic_write(ipic->regs, IPIC_SEMSR, temp);
762 irq_set_default_host(primary_ipic->irqhost);
764 ipic_write(ipic->regs, IPIC_SIMSR_H, 0);
765 ipic_write(ipic->regs, IPIC_SIMSR_L, 0);
767 printk ("IPIC (%d IRQ sources) at %p\n", NR_IPIC_INTS,
773 int ipic_set_priority(unsigned int virq, unsigned int priority)
775 struct ipic *ipic = ipic_from_irq(virq);
776 unsigned int src = virq_to_hw(virq);
783 if (ipic_info[src].prio == 0)
786 temp = ipic_read(ipic->regs, ipic_info[src].prio);
789 temp &= ~(0x7 << (20 + (3 - priority) * 3));
790 temp |= ipic_info[src].prio_mask << (20 + (3 - priority) * 3);
792 temp &= ~(0x7 << (4 + (7 - priority) * 3));
793 temp |= ipic_info[src].prio_mask << (4 + (7 - priority) * 3);
796 ipic_write(ipic->regs, ipic_info[src].prio, temp);
801 void ipic_set_highest_priority(unsigned int virq)
803 struct ipic *ipic = ipic_from_irq(virq);
804 unsigned int src = virq_to_hw(virq);
807 temp = ipic_read(ipic->regs, IPIC_SICFR);
809 /* clear and set HPI */
811 temp |= (src & 0x7f) << 24;
813 ipic_write(ipic->regs, IPIC_SICFR, temp);
816 void ipic_set_default_priority(void)
818 ipic_write(primary_ipic->regs, IPIC_SIPRR_A, IPIC_PRIORITY_DEFAULT);
819 ipic_write(primary_ipic->regs, IPIC_SIPRR_B, IPIC_PRIORITY_DEFAULT);
820 ipic_write(primary_ipic->regs, IPIC_SIPRR_C, IPIC_PRIORITY_DEFAULT);
821 ipic_write(primary_ipic->regs, IPIC_SIPRR_D, IPIC_PRIORITY_DEFAULT);
822 ipic_write(primary_ipic->regs, IPIC_SMPRR_A, IPIC_PRIORITY_DEFAULT);
823 ipic_write(primary_ipic->regs, IPIC_SMPRR_B, IPIC_PRIORITY_DEFAULT);
826 void ipic_enable_mcp(enum ipic_mcp_irq mcp_irq)
828 struct ipic *ipic = primary_ipic;
831 temp = ipic_read(ipic->regs, IPIC_SERMR);
832 temp |= (1 << (31 - mcp_irq));
833 ipic_write(ipic->regs, IPIC_SERMR, temp);
836 void ipic_disable_mcp(enum ipic_mcp_irq mcp_irq)
838 struct ipic *ipic = primary_ipic;
841 temp = ipic_read(ipic->regs, IPIC_SERMR);
842 temp &= (1 << (31 - mcp_irq));
843 ipic_write(ipic->regs, IPIC_SERMR, temp);
846 u32 ipic_get_mcp_status(void)
848 return ipic_read(primary_ipic->regs, IPIC_SERMR);
851 void ipic_clear_mcp_status(u32 mask)
853 ipic_write(primary_ipic->regs, IPIC_SERMR, mask);
856 /* Return an interrupt vector or 0 if no interrupt is pending. */
857 unsigned int ipic_get_irq(void)
861 BUG_ON(primary_ipic == NULL);
863 #define IPIC_SIVCR_VECTOR_MASK 0x7f
864 irq = ipic_read(primary_ipic->regs, IPIC_SIVCR) & IPIC_SIVCR_VECTOR_MASK;
866 if (irq == 0) /* 0 --> no irq is pending */
869 return irq_linear_revmap(primary_ipic->irqhost, irq);
872 #ifdef CONFIG_SUSPEND
885 static int ipic_suspend(void)
887 struct ipic *ipic = primary_ipic;
889 ipic_saved_state.sicfr = ipic_read(ipic->regs, IPIC_SICFR);
890 ipic_saved_state.siprr[0] = ipic_read(ipic->regs, IPIC_SIPRR_A);
891 ipic_saved_state.siprr[1] = ipic_read(ipic->regs, IPIC_SIPRR_D);
892 ipic_saved_state.simsr[0] = ipic_read(ipic->regs, IPIC_SIMSR_H);
893 ipic_saved_state.simsr[1] = ipic_read(ipic->regs, IPIC_SIMSR_L);
894 ipic_saved_state.sicnr = ipic_read(ipic->regs, IPIC_SICNR);
895 ipic_saved_state.smprr[0] = ipic_read(ipic->regs, IPIC_SMPRR_A);
896 ipic_saved_state.smprr[1] = ipic_read(ipic->regs, IPIC_SMPRR_B);
897 ipic_saved_state.semsr = ipic_read(ipic->regs, IPIC_SEMSR);
898 ipic_saved_state.secnr = ipic_read(ipic->regs, IPIC_SECNR);
899 ipic_saved_state.sermr = ipic_read(ipic->regs, IPIC_SERMR);
900 ipic_saved_state.sercr = ipic_read(ipic->regs, IPIC_SERCR);
902 if (fsl_deep_sleep()) {
903 /* In deep sleep, make sure there can be no
904 * pending interrupts, as this can cause
907 ipic_write(ipic->regs, IPIC_SIMSR_H, 0);
908 ipic_write(ipic->regs, IPIC_SIMSR_L, 0);
909 ipic_write(ipic->regs, IPIC_SEMSR, 0);
910 ipic_write(ipic->regs, IPIC_SERMR, 0);
916 static void ipic_resume(void)
918 struct ipic *ipic = primary_ipic;
920 ipic_write(ipic->regs, IPIC_SICFR, ipic_saved_state.sicfr);
921 ipic_write(ipic->regs, IPIC_SIPRR_A, ipic_saved_state.siprr[0]);
922 ipic_write(ipic->regs, IPIC_SIPRR_D, ipic_saved_state.siprr[1]);
923 ipic_write(ipic->regs, IPIC_SIMSR_H, ipic_saved_state.simsr[0]);
924 ipic_write(ipic->regs, IPIC_SIMSR_L, ipic_saved_state.simsr[1]);
925 ipic_write(ipic->regs, IPIC_SICNR, ipic_saved_state.sicnr);
926 ipic_write(ipic->regs, IPIC_SMPRR_A, ipic_saved_state.smprr[0]);
927 ipic_write(ipic->regs, IPIC_SMPRR_B, ipic_saved_state.smprr[1]);
928 ipic_write(ipic->regs, IPIC_SEMSR, ipic_saved_state.semsr);
929 ipic_write(ipic->regs, IPIC_SECNR, ipic_saved_state.secnr);
930 ipic_write(ipic->regs, IPIC_SERMR, ipic_saved_state.sermr);
931 ipic_write(ipic->regs, IPIC_SERCR, ipic_saved_state.sercr);
934 #define ipic_suspend NULL
935 #define ipic_resume NULL
938 static struct syscore_ops ipic_syscore_ops = {
939 .suspend = ipic_suspend,
940 .resume = ipic_resume,
943 static int __init init_ipic_syscore(void)
945 if (!primary_ipic || !primary_ipic->regs)
948 printk(KERN_DEBUG "Registering ipic system core operations\n");
949 register_syscore_ops(&ipic_syscore_ops);
954 subsys_initcall(init_ipic_syscore);