]> git.karo-electronics.de Git - linux-beck.git/blob - arch/metag/kernel/irq.c
7c043491e1e3fa1df419686cc29ff9d9e7e8cca8
[linux-beck.git] / arch / metag / kernel / irq.c
1 /*
2  * Linux/Meta general interrupt handling code
3  *
4  */
5
6 #include <linux/kernel.h>
7 #include <linux/interrupt.h>
8 #include <linux/init.h>
9 #include <linux/irqdomain.h>
10 #include <linux/ratelimit.h>
11
12 #include <asm/core_reg.h>
13 #include <asm/mach/arch.h>
14 #include <asm/uaccess.h>
15
16 #ifdef CONFIG_4KSTACKS
17 union irq_ctx {
18         struct thread_info      tinfo;
19         u32                     stack[THREAD_SIZE/sizeof(u32)];
20 };
21
22 static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
23 static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
24 #endif
25
26 struct irq_domain *root_domain;
27
28 static unsigned int startup_meta_irq(struct irq_data *data)
29 {
30         tbi_startup_interrupt(data->hwirq);
31         return 0;
32 }
33
34 static void shutdown_meta_irq(struct irq_data *data)
35 {
36         tbi_shutdown_interrupt(data->hwirq);
37 }
38
39 void do_IRQ(int irq, struct pt_regs *regs)
40 {
41         struct pt_regs *old_regs = set_irq_regs(regs);
42 #ifdef CONFIG_4KSTACKS
43         struct irq_desc *desc;
44         union irq_ctx *curctx, *irqctx;
45         u32 *isp;
46 #endif
47
48         irq_enter();
49
50         irq = irq_linear_revmap(root_domain, irq);
51
52 #ifdef CONFIG_DEBUG_STACKOVERFLOW
53         /* Debugging check for stack overflow: is there less than 1KB free? */
54         {
55                 unsigned long sp;
56
57                 sp = __core_reg_get(A0StP);
58                 sp &= THREAD_SIZE - 1;
59
60                 if (unlikely(sp > (THREAD_SIZE - 1024)))
61                         pr_err("Stack overflow in do_IRQ: %ld\n", sp);
62         }
63 #endif
64
65
66 #ifdef CONFIG_4KSTACKS
67         curctx = (union irq_ctx *) current_thread_info();
68         irqctx = hardirq_ctx[smp_processor_id()];
69
70         /*
71          * this is where we switch to the IRQ stack. However, if we are
72          * already using the IRQ stack (because we interrupted a hardirq
73          * handler) we can't do that and just have to keep using the
74          * current stack (which is the irq stack already after all)
75          */
76         if (curctx != irqctx) {
77                 /* build the stack frame on the IRQ stack */
78                 isp = (u32 *) ((char *)irqctx + sizeof(struct thread_info));
79                 irqctx->tinfo.task = curctx->tinfo.task;
80
81                 /*
82                  * Copy the softirq bits in preempt_count so that the
83                  * softirq checks work in the hardirq context.
84                  */
85                 irqctx->tinfo.preempt_count =
86                         (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
87                         (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
88
89                 desc = irq_to_desc(irq);
90
91                 asm volatile (
92                         "MOV   D0.5,%0\n"
93                         "MOV   D1Ar1,%1\n"
94                         "MOV   D1RtP,%2\n"
95                         "MOV   D0Ar2,%3\n"
96                         "SWAP  A0StP,D0.5\n"
97                         "SWAP  PC,D1RtP\n"
98                         "MOV   A0StP,D0.5\n"
99                         :
100                         : "r" (isp), "r" (irq), "r" (desc->handle_irq),
101                           "r" (desc)
102                         : "memory", "cc", "D1Ar1", "D0Ar2", "D1Ar3", "D0Ar4",
103                           "D1Ar5", "D0Ar6", "D0Re0", "D1Re0", "D0.4", "D1RtP",
104                           "D0.5"
105                         );
106         } else
107 #endif
108                 generic_handle_irq(irq);
109
110         irq_exit();
111
112         set_irq_regs(old_regs);
113 }
114
115 #ifdef CONFIG_4KSTACKS
116
117 static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
118
119 static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
120
121 /*
122  * allocate per-cpu stacks for hardirq and for softirq processing
123  */
124 void irq_ctx_init(int cpu)
125 {
126         union irq_ctx *irqctx;
127
128         if (hardirq_ctx[cpu])
129                 return;
130
131         irqctx = (union irq_ctx *) &hardirq_stack[cpu * THREAD_SIZE];
132         irqctx->tinfo.task              = NULL;
133         irqctx->tinfo.exec_domain       = NULL;
134         irqctx->tinfo.cpu               = cpu;
135         irqctx->tinfo.preempt_count     = HARDIRQ_OFFSET;
136         irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
137
138         hardirq_ctx[cpu] = irqctx;
139
140         irqctx = (union irq_ctx *) &softirq_stack[cpu * THREAD_SIZE];
141         irqctx->tinfo.task              = NULL;
142         irqctx->tinfo.exec_domain       = NULL;
143         irqctx->tinfo.cpu               = cpu;
144         irqctx->tinfo.preempt_count     = 0;
145         irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
146
147         softirq_ctx[cpu] = irqctx;
148
149         pr_info("CPU %u irqstacks, hard=%p soft=%p\n",
150                 cpu, hardirq_ctx[cpu], softirq_ctx[cpu]);
151 }
152
153 void irq_ctx_exit(int cpu)
154 {
155         hardirq_ctx[smp_processor_id()] = NULL;
156 }
157
158 extern asmlinkage void __do_softirq(void);
159
160 asmlinkage void do_softirq(void)
161 {
162         unsigned long flags;
163         struct thread_info *curctx;
164         union irq_ctx *irqctx;
165         u32 *isp;
166
167         if (in_interrupt())
168                 return;
169
170         local_irq_save(flags);
171
172         if (local_softirq_pending()) {
173                 curctx = current_thread_info();
174                 irqctx = softirq_ctx[smp_processor_id()];
175                 irqctx->tinfo.task = curctx->task;
176
177                 /* build the stack frame on the softirq stack */
178                 isp = (u32 *) ((char *)irqctx + sizeof(struct thread_info));
179
180                 asm volatile (
181                         "MOV   D0.5,%0\n"
182                         "SWAP  A0StP,D0.5\n"
183                         "CALLR D1RtP,___do_softirq\n"
184                         "MOV   A0StP,D0.5\n"
185                         :
186                         : "r" (isp)
187                         : "memory", "cc", "D1Ar1", "D0Ar2", "D1Ar3", "D0Ar4",
188                           "D1Ar5", "D0Ar6", "D0Re0", "D1Re0", "D0.4", "D1RtP",
189                           "D0.5"
190                         );
191                 /*
192                  * Shouldn't happen, we returned above if in_interrupt():
193                  */
194                 WARN_ON_ONCE(softirq_count());
195         }
196
197         local_irq_restore(flags);
198 }
199 #endif
200
201 static struct irq_chip meta_irq_type = {
202         .name = "META-IRQ",
203         .irq_startup = startup_meta_irq,
204         .irq_shutdown = shutdown_meta_irq,
205 };
206
207 /**
208  * tbisig_map() - Map a TBI signal number to a virtual IRQ number.
209  * @hw:         Number of the TBI signal. Must be in range.
210  *
211  * Returns:     The virtual IRQ number of the TBI signal number IRQ specified by
212  *              @hw.
213  */
214 int tbisig_map(unsigned int hw)
215 {
216         return irq_create_mapping(root_domain, hw);
217 }
218
219 /**
220  * metag_tbisig_map() - map a tbi signal to a Linux virtual IRQ number
221  * @d:          root irq domain
222  * @irq:        virtual irq number
223  * @hw:         hardware irq number (TBI signal number)
224  *
225  * This sets up a virtual irq for a specified TBI signal number.
226  */
227 static int metag_tbisig_map(struct irq_domain *d, unsigned int irq,
228                             irq_hw_number_t hw)
229 {
230 #ifdef CONFIG_SMP
231         irq_set_chip_and_handler(irq, &meta_irq_type, handle_percpu_irq);
232 #else
233         irq_set_chip_and_handler(irq, &meta_irq_type, handle_simple_irq);
234 #endif
235         return 0;
236 }
237
238 static const struct irq_domain_ops metag_tbisig_domain_ops = {
239         .map = metag_tbisig_map,
240 };
241
242 /*
243  * void init_IRQ(void)
244  *
245  * Parameters:  None
246  *
247  * Returns:     Nothing
248  *
249  * This function should be called during kernel startup to initialize
250  * the IRQ handling routines.
251  */
252 void __init init_IRQ(void)
253 {
254         root_domain = irq_domain_add_linear(NULL, 32,
255                                             &metag_tbisig_domain_ops, NULL);
256         if (unlikely(!root_domain))
257                 panic("init_IRQ: cannot add root IRQ domain");
258
259         irq_ctx_init(smp_processor_id());
260
261         if (machine_desc->init_irq)
262                 machine_desc->init_irq();
263 }
264
265 int __init arch_probe_nr_irqs(void)
266 {
267         if (machine_desc->nr_irqs)
268                 nr_irqs = machine_desc->nr_irqs;
269         return 0;
270 }
271
272 #ifdef CONFIG_HOTPLUG_CPU
273 static void route_irq(struct irq_data *data, unsigned int irq, unsigned int cpu)
274 {
275         struct irq_desc *desc = irq_to_desc(irq);
276         struct irq_chip *chip = irq_data_get_irq_chip(data);
277
278         raw_spin_lock_irq(&desc->lock);
279         if (chip->irq_set_affinity)
280                 chip->irq_set_affinity(data, cpumask_of(cpu), false);
281         raw_spin_unlock_irq(&desc->lock);
282 }
283
284 /*
285  * The CPU has been marked offline.  Migrate IRQs off this CPU.  If
286  * the affinity settings do not allow other CPUs, force them onto any
287  * available CPU.
288  */
289 void migrate_irqs(void)
290 {
291         unsigned int i, cpu = smp_processor_id();
292         struct irq_desc *desc;
293
294         for_each_irq_desc(i, desc) {
295                 struct irq_data *data = irq_desc_get_irq_data(desc);
296                 unsigned int newcpu;
297
298                 if (irqd_is_per_cpu(data))
299                         continue;
300
301                 if (!cpumask_test_cpu(cpu, data->affinity))
302                         continue;
303
304                 newcpu = cpumask_any_and(data->affinity, cpu_online_mask);
305
306                 if (newcpu >= nr_cpu_ids) {
307                         pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n",
308                                             i, cpu);
309
310                         cpumask_setall(data->affinity);
311                         newcpu = cpumask_any_and(data->affinity,
312                                                  cpu_online_mask);
313                 }
314
315                 route_irq(data, i, newcpu);
316         }
317 }
318 #endif /* CONFIG_HOTPLUG_CPU */