2 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
4 #ifndef _ASM_POWERPC_HW_IRQ_H
5 #define _ASM_POWERPC_HW_IRQ_H
9 #include <linux/errno.h>
10 #include <linux/compiler.h>
11 #include <asm/ptrace.h>
12 #include <asm/processor.h>
17 * PACA flags in paca->irq_happened.
19 * This bits are set when interrupts occur while soft-disabled
20 * and allow a proper replay. Additionally, PACA_IRQ_HARD_DIS
21 * is set whenever we manually hard disable.
23 #define PACA_IRQ_HARD_DIS 0x01
24 #define PACA_IRQ_DBELL 0x02
25 #define PACA_IRQ_EE 0x04
26 #define PACA_IRQ_DEC 0x08 /* Or FIT */
27 #define PACA_IRQ_EE_EDGE 0x10 /* BookE only */
29 #endif /* CONFIG_PPC64 */
33 extern void __replay_interrupt(unsigned int vector);
35 extern void timer_interrupt(struct pt_regs *);
36 extern void performance_monitor_exception(struct pt_regs *regs);
37 extern void WatchdogException(struct pt_regs *regs);
38 extern void unknown_exception(struct pt_regs *regs);
43 static inline unsigned long arch_local_save_flags(void)
50 : "i" (offsetof(struct paca_struct, soft_enabled)));
55 static inline unsigned long arch_local_irq_disable(void)
57 unsigned long flags, zero;
60 "li %1,0; lbz %0,%2(13); stb %1,%2(13)"
61 : "=r" (flags), "=&r" (zero)
62 : "i" (offsetof(struct paca_struct, soft_enabled))
68 extern void arch_local_irq_restore(unsigned long);
70 static inline void arch_local_irq_enable(void)
72 arch_local_irq_restore(1);
75 static inline unsigned long arch_local_irq_save(void)
77 return arch_local_irq_disable();
80 static inline bool arch_irqs_disabled_flags(unsigned long flags)
85 static inline bool arch_irqs_disabled(void)
87 return arch_irqs_disabled_flags(arch_local_save_flags());
90 #ifdef CONFIG_PPC_BOOK3E
91 #define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory")
92 #define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory")
94 #define __hard_irq_enable() __mtmsrd(local_paca->kernel_msr | MSR_EE, 1)
95 #define __hard_irq_disable() __mtmsrd(local_paca->kernel_msr, 1)
98 #define hard_irq_disable() do { \
99 u8 _was_enabled = get_paca()->soft_enabled; \
100 __hard_irq_disable(); \
101 get_paca()->soft_enabled = 0; \
102 get_paca()->irq_happened |= PACA_IRQ_HARD_DIS; \
104 trace_hardirqs_off(); \
107 static inline bool lazy_irq_pending(void)
109 return !!(get_paca()->irq_happened & ~PACA_IRQ_HARD_DIS);
113 * This is called by asynchronous interrupts to conditionally
114 * re-enable hard interrupts when soft-disabled after having
115 * cleared the source of the interrupt
117 static inline void may_hard_irq_enable(void)
119 get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
120 if (!(get_paca()->irq_happened & PACA_IRQ_EE))
124 static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
129 extern bool prep_irq_for_idle(void);
131 #else /* CONFIG_PPC64 */
133 #define SET_MSR_EE(x) mtmsr(x)
135 static inline unsigned long arch_local_save_flags(void)
140 static inline void arch_local_irq_restore(unsigned long flags)
142 #if defined(CONFIG_BOOKE)
143 asm volatile("wrtee %0" : : "r" (flags) : "memory");
149 static inline unsigned long arch_local_irq_save(void)
151 unsigned long flags = arch_local_save_flags();
153 asm volatile("wrteei 0" : : : "memory");
155 SET_MSR_EE(flags & ~MSR_EE);
160 static inline void arch_local_irq_disable(void)
163 asm volatile("wrteei 0" : : : "memory");
165 arch_local_irq_save();
169 static inline void arch_local_irq_enable(void)
172 asm volatile("wrteei 1" : : : "memory");
174 unsigned long msr = mfmsr();
175 SET_MSR_EE(msr | MSR_EE);
179 static inline bool arch_irqs_disabled_flags(unsigned long flags)
181 return (flags & MSR_EE) == 0;
184 static inline bool arch_irqs_disabled(void)
186 return arch_irqs_disabled_flags(arch_local_save_flags());
189 #define hard_irq_disable() arch_local_irq_disable()
191 static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
193 return !(regs->msr & MSR_EE);
196 static inline void may_hard_irq_enable(void) { }
198 #endif /* CONFIG_PPC64 */
200 #define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST
203 * interrupt-retrigger: should we handle this via lost interrupts and IPIs
204 * or should we not care like we do now ? --BenH.
208 #endif /* __ASSEMBLY__ */
209 #endif /* __KERNEL__ */
210 #endif /* _ASM_POWERPC_HW_IRQ_H */