]> git.karo-electronics.de Git - linux-beck.git/blob - arch/ppc64/kernel/xics.c
ppc64: Use the correct prototypes for i8259 functions
[linux-beck.git] / arch / ppc64 / kernel / xics.c
1 /* 
2  * arch/ppc64/kernel/xics.c
3  *
4  * Copyright 2000 IBM Corporation.
5  *
6  *  This program is free software; you can redistribute it and/or
7  *  modify it under the terms of the GNU General Public License
8  *  as published by the Free Software Foundation; either version
9  *  2 of the License, or (at your option) any later version.
10  */
11 #include <linux/config.h>
12 #include <linux/types.h>
13 #include <linux/threads.h>
14 #include <linux/kernel.h>
15 #include <linux/irq.h>
16 #include <linux/smp.h>
17 #include <linux/interrupt.h>
18 #include <linux/signal.h>
19 #include <linux/init.h>
20 #include <linux/gfp.h>
21 #include <linux/radix-tree.h>
22 #include <linux/cpu.h>
23 #include <asm/prom.h>
24 #include <asm/io.h>
25 #include <asm/pgtable.h>
26 #include <asm/smp.h>
27 #include <asm/rtas.h>
28 #include <asm/xics.h>
29 #include <asm/hvcall.h>
30 #include <asm/machdep.h>
31 #include <asm/i8259.h>
32
33 static unsigned int xics_startup(unsigned int irq);
34 static void xics_enable_irq(unsigned int irq);
35 static void xics_disable_irq(unsigned int irq);
36 static void xics_mask_and_ack_irq(unsigned int irq);
37 static void xics_end_irq(unsigned int irq);
38 static void xics_set_affinity(unsigned int irq_nr, cpumask_t cpumask);
39
40 static struct hw_interrupt_type xics_pic = {
41         .typename = " XICS     ",
42         .startup = xics_startup,
43         .enable = xics_enable_irq,
44         .disable = xics_disable_irq,
45         .ack = xics_mask_and_ack_irq,
46         .end = xics_end_irq,
47         .set_affinity = xics_set_affinity
48 };
49
50 static struct hw_interrupt_type xics_8259_pic = {
51         .typename = " XICS/8259",
52         .ack = xics_mask_and_ack_irq,
53 };
54
55 /* This is used to map real irq numbers to virtual */
56 static struct radix_tree_root irq_map = RADIX_TREE_INIT(GFP_ATOMIC);
57
58 #define XICS_IPI                2
59 #define XICS_IRQ_SPURIOUS       0
60
61 /* Want a priority other than 0.  Various HW issues require this. */
62 #define DEFAULT_PRIORITY        5
63
64 /* 
65  * Mark IPIs as higher priority so we can take them inside interrupts that
66  * arent marked SA_INTERRUPT
67  */
68 #define IPI_PRIORITY            4
69
70 struct xics_ipl {
71         union {
72                 u32 word;
73                 u8 bytes[4];
74         } xirr_poll;
75         union {
76                 u32 word;
77                 u8 bytes[4];
78         } xirr;
79         u32 dummy;
80         union {
81                 u32 word;
82                 u8 bytes[4];
83         } qirr;
84 };
85
86 static struct xics_ipl __iomem *xics_per_cpu[NR_CPUS];
87
88 static int xics_irq_8259_cascade = 0;
89 static int xics_irq_8259_cascade_real = 0;
90 static unsigned int default_server = 0xFF;
91 static unsigned int default_distrib_server = 0;
92 static unsigned int interrupt_server_size = 8;
93
94 /*
95  * XICS only has a single IPI, so encode the messages per CPU
96  */
97 struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned;
98
99 /* RTAS service tokens */
100 static int ibm_get_xive;
101 static int ibm_set_xive;
102 static int ibm_int_on;
103 static int ibm_int_off;
104
105 typedef struct {
106         int (*xirr_info_get)(int cpu);
107         void (*xirr_info_set)(int cpu, int val);
108         void (*cppr_info)(int cpu, u8 val);
109         void (*qirr_info)(int cpu, u8 val);
110 } xics_ops;
111
112
113 /* SMP */
114
115 static int pSeries_xirr_info_get(int n_cpu)
116 {
117         return in_be32(&xics_per_cpu[n_cpu]->xirr.word);
118 }
119
120 static void pSeries_xirr_info_set(int n_cpu, int value)
121 {
122         out_be32(&xics_per_cpu[n_cpu]->xirr.word, value);
123 }
124
125 static void pSeries_cppr_info(int n_cpu, u8 value)
126 {
127         out_8(&xics_per_cpu[n_cpu]->xirr.bytes[0], value);
128 }
129
130 static void pSeries_qirr_info(int n_cpu, u8 value)
131 {
132         out_8(&xics_per_cpu[n_cpu]->qirr.bytes[0], value);
133 }
134
135 static xics_ops pSeries_ops = {
136         pSeries_xirr_info_get,
137         pSeries_xirr_info_set,
138         pSeries_cppr_info,
139         pSeries_qirr_info
140 };
141
142 static xics_ops *ops = &pSeries_ops;
143
144
145 /* LPAR */
146
147 static inline long plpar_eoi(unsigned long xirr)
148 {
149         return plpar_hcall_norets(H_EOI, xirr);
150 }
151
152 static inline long plpar_cppr(unsigned long cppr)
153 {
154         return plpar_hcall_norets(H_CPPR, cppr);
155 }
156
157 static inline long plpar_ipi(unsigned long servernum, unsigned long mfrr)
158 {
159         return plpar_hcall_norets(H_IPI, servernum, mfrr);
160 }
161
162 static inline long plpar_xirr(unsigned long *xirr_ret)
163 {
164         unsigned long dummy;
165         return plpar_hcall(H_XIRR, 0, 0, 0, 0, xirr_ret, &dummy, &dummy);
166 }
167
168 static int pSeriesLP_xirr_info_get(int n_cpu)
169 {
170         unsigned long lpar_rc;
171         unsigned long return_value; 
172
173         lpar_rc = plpar_xirr(&return_value);
174         if (lpar_rc != H_Success)
175                 panic(" bad return code xirr - rc = %lx \n", lpar_rc); 
176         return (int)return_value;
177 }
178
179 static void pSeriesLP_xirr_info_set(int n_cpu, int value)
180 {
181         unsigned long lpar_rc;
182         unsigned long val64 = value & 0xffffffff;
183
184         lpar_rc = plpar_eoi(val64);
185         if (lpar_rc != H_Success)
186                 panic("bad return code EOI - rc = %ld, value=%lx\n", lpar_rc,
187                       val64); 
188 }
189
190 void pSeriesLP_cppr_info(int n_cpu, u8 value)
191 {
192         unsigned long lpar_rc;
193
194         lpar_rc = plpar_cppr(value);
195         if (lpar_rc != H_Success)
196                 panic("bad return code cppr - rc = %lx\n", lpar_rc); 
197 }
198
199 static void pSeriesLP_qirr_info(int n_cpu , u8 value)
200 {
201         unsigned long lpar_rc;
202
203         lpar_rc = plpar_ipi(get_hard_smp_processor_id(n_cpu), value);
204         if (lpar_rc != H_Success)
205                 panic("bad return code qirr - rc = %lx\n", lpar_rc); 
206 }
207
208 xics_ops pSeriesLP_ops = {
209         pSeriesLP_xirr_info_get,
210         pSeriesLP_xirr_info_set,
211         pSeriesLP_cppr_info,
212         pSeriesLP_qirr_info
213 };
214
215 static unsigned int xics_startup(unsigned int virq)
216 {
217         unsigned int irq;
218
219         irq = irq_offset_down(virq);
220         if (radix_tree_insert(&irq_map, virt_irq_to_real(irq),
221                               &virt_irq_to_real_map[irq]) == -ENOMEM)
222                 printk(KERN_CRIT "Out of memory creating real -> virtual"
223                        " IRQ mapping for irq %u (real 0x%x)\n",
224                        virq, virt_irq_to_real(irq));
225         xics_enable_irq(virq);
226         return 0;       /* return value is ignored */
227 }
228
229 static unsigned int real_irq_to_virt(unsigned int real_irq)
230 {
231         unsigned int *ptr;
232
233         ptr = radix_tree_lookup(&irq_map, real_irq);
234         if (ptr == NULL)
235                 return NO_IRQ;
236         return ptr - virt_irq_to_real_map;
237 }
238
239 #ifdef CONFIG_SMP
240 static int get_irq_server(unsigned int irq)
241 {
242         unsigned int server;
243         /* For the moment only implement delivery to all cpus or one cpu */
244         cpumask_t cpumask = irq_affinity[irq];
245         cpumask_t tmp = CPU_MASK_NONE;
246
247         if (!distribute_irqs)
248                 return default_server;
249
250         if (cpus_equal(cpumask, CPU_MASK_ALL)) {
251                 server = default_distrib_server;
252         } else {
253                 cpus_and(tmp, cpu_online_map, cpumask);
254
255                 if (cpus_empty(tmp))
256                         server = default_distrib_server;
257                 else
258                         server = get_hard_smp_processor_id(first_cpu(tmp));
259         }
260
261         return server;
262
263 }
264 #else
265 static int get_irq_server(unsigned int irq)
266 {
267         return default_server;
268 }
269 #endif
270
271 static void xics_enable_irq(unsigned int virq)
272 {
273         unsigned int irq;
274         int call_status;
275         unsigned int server;
276
277         irq = virt_irq_to_real(irq_offset_down(virq));
278         if (irq == XICS_IPI)
279                 return;
280
281         server = get_irq_server(virq);
282         call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server,
283                                 DEFAULT_PRIORITY);
284         if (call_status != 0) {
285                 printk(KERN_ERR "xics_enable_irq: irq=%u: ibm_set_xive "
286                        "returned %d\n", irq, call_status);
287                 printk("set_xive %x, server %x\n", ibm_set_xive, server);
288                 return;
289         }
290
291         /* Now unmask the interrupt (often a no-op) */
292         call_status = rtas_call(ibm_int_on, 1, 1, NULL, irq);
293         if (call_status != 0) {
294                 printk(KERN_ERR "xics_enable_irq: irq=%u: ibm_int_on "
295                        "returned %d\n", irq, call_status);
296                 return;
297         }
298 }
299
300 static void xics_disable_real_irq(unsigned int irq)
301 {
302         int call_status;
303         unsigned int server;
304
305         if (irq == XICS_IPI)
306                 return;
307
308         call_status = rtas_call(ibm_int_off, 1, 1, NULL, irq);
309         if (call_status != 0) {
310                 printk(KERN_ERR "xics_disable_real_irq: irq=%u: "
311                        "ibm_int_off returned %d\n", irq, call_status);
312                 return;
313         }
314
315         server = get_irq_server(irq);
316         /* Have to set XIVE to 0xff to be able to remove a slot */
317         call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, 0xff);
318         if (call_status != 0) {
319                 printk(KERN_ERR "xics_disable_irq: irq=%u: ibm_set_xive(0xff)"
320                        " returned %d\n", irq, call_status);
321                 return;
322         }
323 }
324
325 static void xics_disable_irq(unsigned int virq)
326 {
327         unsigned int irq;
328
329         irq = virt_irq_to_real(irq_offset_down(virq));
330         xics_disable_real_irq(irq);
331 }
332
333 static void xics_end_irq(unsigned int irq)
334 {
335         int cpu = smp_processor_id();
336
337         iosync();
338         ops->xirr_info_set(cpu, ((0xff << 24) |
339                                  (virt_irq_to_real(irq_offset_down(irq)))));
340
341 }
342
343 static void xics_mask_and_ack_irq(unsigned int irq)
344 {
345         int cpu = smp_processor_id();
346
347         if (irq < irq_offset_value()) {
348                 i8259_pic.ack(irq);
349                 iosync();
350                 ops->xirr_info_set(cpu, ((0xff<<24) |
351                                          xics_irq_8259_cascade_real));
352                 iosync();
353         }
354 }
355
356 int xics_get_irq(struct pt_regs *regs)
357 {
358         unsigned int cpu = smp_processor_id();
359         unsigned int vec;
360         int irq;
361
362         vec = ops->xirr_info_get(cpu);
363         /*  (vec >> 24) == old priority */
364         vec &= 0x00ffffff;
365
366         /* for sanity, this had better be < NR_IRQS - 16 */
367         if (vec == xics_irq_8259_cascade_real) {
368                 irq = i8259_irq(regs);
369                 if (irq == -1) {
370                         /* Spurious cascaded interrupt.  Still must ack xics */
371                         xics_end_irq(irq_offset_up(xics_irq_8259_cascade));
372
373                         irq = -1;
374                 }
375         } else if (vec == XICS_IRQ_SPURIOUS) {
376                 irq = -1;
377         } else {
378                 irq = real_irq_to_virt(vec);
379                 if (irq == NO_IRQ)
380                         irq = real_irq_to_virt_slowpath(vec);
381                 if (irq == NO_IRQ) {
382                         printk(KERN_ERR "Interrupt %u (real) is invalid,"
383                                " disabling it.\n", vec);
384                         xics_disable_real_irq(vec);
385                 } else
386                         irq = irq_offset_up(irq);
387         }
388         return irq;
389 }
390
391 #ifdef CONFIG_SMP
392
393 irqreturn_t xics_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
394 {
395         int cpu = smp_processor_id();
396
397         ops->qirr_info(cpu, 0xff);
398
399         WARN_ON(cpu_is_offline(cpu));
400
401         while (xics_ipi_message[cpu].value) {
402                 if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION,
403                                        &xics_ipi_message[cpu].value)) {
404                         mb();
405                         smp_message_recv(PPC_MSG_CALL_FUNCTION, regs);
406                 }
407                 if (test_and_clear_bit(PPC_MSG_RESCHEDULE,
408                                        &xics_ipi_message[cpu].value)) {
409                         mb();
410                         smp_message_recv(PPC_MSG_RESCHEDULE, regs);
411                 }
412 #if 0
413                 if (test_and_clear_bit(PPC_MSG_MIGRATE_TASK,
414                                        &xics_ipi_message[cpu].value)) {
415                         mb();
416                         smp_message_recv(PPC_MSG_MIGRATE_TASK, regs);
417                 }
418 #endif
419 #ifdef CONFIG_DEBUGGER
420                 if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK,
421                                        &xics_ipi_message[cpu].value)) {
422                         mb();
423                         smp_message_recv(PPC_MSG_DEBUGGER_BREAK, regs);
424                 }
425 #endif
426         }
427         return IRQ_HANDLED;
428 }
429
430 void xics_cause_IPI(int cpu)
431 {
432         ops->qirr_info(cpu, IPI_PRIORITY);
433 }
434 #endif /* CONFIG_SMP */
435
436 void xics_setup_cpu(void)
437 {
438         int cpu = smp_processor_id();
439
440         ops->cppr_info(cpu, 0xff);
441         iosync();
442
443         /*
444          * Put the calling processor into the GIQ.  This is really only
445          * necessary from a secondary thread as the OF start-cpu interface
446          * performs this function for us on primary threads.
447          *
448          * XXX: undo of teardown on kexec needs this too, as may hotplug
449          */
450         rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE,
451                 (1UL << interrupt_server_size) - 1 - default_distrib_server, 1);
452 }
453
454 void xics_init_IRQ(void)
455 {
456         int i;
457         unsigned long intr_size = 0;
458         struct device_node *np;
459         uint *ireg, ilen, indx = 0;
460         unsigned long intr_base = 0;
461         struct xics_interrupt_node {
462                 unsigned long addr;
463                 unsigned long size;
464         } intnodes[NR_CPUS]; 
465
466         ppc64_boot_msg(0x20, "XICS Init");
467
468         ibm_get_xive = rtas_token("ibm,get-xive");
469         ibm_set_xive = rtas_token("ibm,set-xive");
470         ibm_int_on  = rtas_token("ibm,int-on");
471         ibm_int_off = rtas_token("ibm,int-off");
472
473         np = of_find_node_by_type(NULL, "PowerPC-External-Interrupt-Presentation");
474         if (!np)
475                 panic("xics_init_IRQ: can't find interrupt presentation");
476
477 nextnode:
478         ireg = (uint *)get_property(np, "ibm,interrupt-server-ranges", NULL);
479         if (ireg) {
480                 /*
481                  * set node starting index for this node
482                  */
483                 indx = *ireg;
484         }
485
486         ireg = (uint *)get_property(np, "reg", &ilen);
487         if (!ireg)
488                 panic("xics_init_IRQ: can't find interrupt reg property");
489         
490         while (ilen) {
491                 intnodes[indx].addr = (unsigned long)*ireg++ << 32;
492                 ilen -= sizeof(uint);
493                 intnodes[indx].addr |= *ireg++;
494                 ilen -= sizeof(uint);
495                 intnodes[indx].size = (unsigned long)*ireg++ << 32;
496                 ilen -= sizeof(uint);
497                 intnodes[indx].size |= *ireg++;
498                 ilen -= sizeof(uint);
499                 indx++;
500                 if (indx >= NR_CPUS) break;
501         }
502
503         np = of_find_node_by_type(np, "PowerPC-External-Interrupt-Presentation");
504         if ((indx < NR_CPUS) && np) goto nextnode;
505
506         /* Find the server numbers for the boot cpu. */
507         for (np = of_find_node_by_type(NULL, "cpu");
508              np;
509              np = of_find_node_by_type(np, "cpu")) {
510                 ireg = (uint *)get_property(np, "reg", &ilen);
511                 if (ireg && ireg[0] == boot_cpuid_phys) {
512                         ireg = (uint *)get_property(np, "ibm,ppc-interrupt-gserver#s",
513                                                     &ilen);
514                         i = ilen / sizeof(int);
515                         if (ireg && i > 0) {
516                                 default_server = ireg[0];
517                                 default_distrib_server = ireg[i-1]; /* take last element */
518                         }
519                         ireg = (uint *)get_property(np,
520                                         "ibm,interrupt-server#-size", NULL);
521                         if (ireg)
522                                 interrupt_server_size = *ireg;
523                         break;
524                 }
525         }
526         of_node_put(np);
527
528         intr_base = intnodes[0].addr;
529         intr_size = intnodes[0].size;
530
531         np = of_find_node_by_type(NULL, "interrupt-controller");
532         if (!np) {
533                 printk(KERN_WARNING "xics: no ISA interrupt controller\n");
534                 xics_irq_8259_cascade_real = -1;
535                 xics_irq_8259_cascade = -1;
536         } else {
537                 ireg = (uint *) get_property(np, "interrupts", NULL);
538                 if (!ireg)
539                         panic("xics_init_IRQ: can't find ISA interrupts property");
540
541                 xics_irq_8259_cascade_real = *ireg;
542                 xics_irq_8259_cascade
543                         = virt_irq_create_mapping(xics_irq_8259_cascade_real);
544                 of_node_put(np);
545         }
546
547         if (systemcfg->platform == PLATFORM_PSERIES) {
548 #ifdef CONFIG_SMP
549                 for_each_cpu(i) {
550                         int hard_id;
551
552                         /* FIXME: Do this dynamically! --RR */
553                         if (!cpu_present(i))
554                                 continue;
555
556                         hard_id = get_hard_smp_processor_id(i);
557                         xics_per_cpu[i] = ioremap(intnodes[hard_id].addr, 
558                                                   intnodes[hard_id].size);
559                 }
560 #else
561                 xics_per_cpu[0] = ioremap(intr_base, intr_size);
562 #endif /* CONFIG_SMP */
563         } else if (systemcfg->platform == PLATFORM_PSERIES_LPAR) {
564                 ops = &pSeriesLP_ops;
565         }
566
567         xics_8259_pic.enable = i8259_pic.enable;
568         xics_8259_pic.disable = i8259_pic.disable;
569         for (i = 0; i < 16; ++i)
570                 get_irq_desc(i)->handler = &xics_8259_pic;
571         for (; i < NR_IRQS; ++i)
572                 get_irq_desc(i)->handler = &xics_pic;
573
574         xics_setup_cpu();
575
576         ppc64_boot_msg(0x21, "XICS Done");
577 }
578
579 /*
580  * We cant do this in init_IRQ because we need the memory subsystem up for
581  * request_irq()
582  */
583 static int __init xics_setup_i8259(void)
584 {
585         if (ppc64_interrupt_controller == IC_PPC_XIC &&
586             xics_irq_8259_cascade != -1) {
587                 if (request_irq(irq_offset_up(xics_irq_8259_cascade),
588                                 no_action, 0, "8259 cascade", NULL))
589                         printk(KERN_ERR "xics_setup_i8259: couldn't get 8259 "
590                                         "cascade\n");
591                 i8259_init(0, 0);
592         }
593         return 0;
594 }
595 arch_initcall(xics_setup_i8259);
596
597 #ifdef CONFIG_SMP
598 void xics_request_IPIs(void)
599 {
600         virt_irq_to_real_map[XICS_IPI] = XICS_IPI;
601
602         /* IPIs are marked SA_INTERRUPT as they must run with irqs disabled */
603         request_irq(irq_offset_up(XICS_IPI), xics_ipi_action, SA_INTERRUPT,
604                     "IPI", NULL);
605         get_irq_desc(irq_offset_up(XICS_IPI))->status |= IRQ_PER_CPU;
606 }
607 #endif
608
609 static void xics_set_affinity(unsigned int virq, cpumask_t cpumask)
610 {
611         unsigned int irq;
612         int status;
613         int xics_status[2];
614         unsigned long newmask;
615         cpumask_t tmp = CPU_MASK_NONE;
616
617         irq = virt_irq_to_real(irq_offset_down(virq));
618         if (irq == XICS_IPI || irq == NO_IRQ)
619                 return;
620
621         status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
622
623         if (status) {
624                 printk(KERN_ERR "xics_set_affinity: irq=%u ibm,get-xive "
625                        "returns %d\n", irq, status);
626                 return;
627         }
628
629         /* For the moment only implement delivery to all cpus or one cpu */
630         if (cpus_equal(cpumask, CPU_MASK_ALL)) {
631                 newmask = default_distrib_server;
632         } else {
633                 cpus_and(tmp, cpu_online_map, cpumask);
634                 if (cpus_empty(tmp))
635                         return;
636                 newmask = get_hard_smp_processor_id(first_cpu(tmp));
637         }
638
639         status = rtas_call(ibm_set_xive, 3, 1, NULL,
640                                 irq, newmask, xics_status[1]);
641
642         if (status) {
643                 printk(KERN_ERR "xics_set_affinity: irq=%u ibm,set-xive "
644                        "returns %d\n", irq, status);
645                 return;
646         }
647 }
648
649 void xics_teardown_cpu(int secondary)
650 {
651         int cpu = smp_processor_id();
652
653         ops->cppr_info(cpu, 0x00);
654         iosync();
655
656         /*
657          * Some machines need to have at least one cpu in the GIQ,
658          * so leave the master cpu in the group.
659          */
660         if (secondary) {
661                 /*
662                  * we need to EOI the IPI if we got here from kexec down IPI
663                  *
664                  * probably need to check all the other interrupts too
665                  * should we be flagging idle loop instead?
666                  * or creating some task to be scheduled?
667                  */
668                 ops->xirr_info_set(cpu, XICS_IPI);
669                 rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE,
670                         (1UL << interrupt_server_size) - 1 -
671                         default_distrib_server, 0);
672         }
673 }
674
675 #ifdef CONFIG_HOTPLUG_CPU
676
677 /* Interrupts are disabled. */
678 void xics_migrate_irqs_away(void)
679 {
680         int status;
681         unsigned int irq, virq, cpu = smp_processor_id();
682
683         /* Reject any interrupt that was queued to us... */
684         ops->cppr_info(cpu, 0);
685         iosync();
686
687         /* remove ourselves from the global interrupt queue */
688         status = rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE,
689                 (1UL << interrupt_server_size) - 1 - default_distrib_server, 0);
690         WARN_ON(status < 0);
691
692         /* Allow IPIs again... */
693         ops->cppr_info(cpu, DEFAULT_PRIORITY);
694         iosync();
695
696         for_each_irq(virq) {
697                 irq_desc_t *desc;
698                 int xics_status[2];
699                 unsigned long flags;
700
701                 /* We cant set affinity on ISA interrupts */
702                 if (virq < irq_offset_value())
703                         continue;
704
705                 desc = get_irq_desc(virq);
706                 irq = virt_irq_to_real(irq_offset_down(virq));
707
708                 /* We need to get IPIs still. */
709                 if (irq == XICS_IPI || irq == NO_IRQ)
710                         continue;
711
712                 /* We only need to migrate enabled IRQS */
713                 if (desc == NULL || desc->handler == NULL
714                     || desc->action == NULL
715                     || desc->handler->set_affinity == NULL)
716                         continue;
717
718                 spin_lock_irqsave(&desc->lock, flags);
719
720                 status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
721                 if (status) {
722                         printk(KERN_ERR "migrate_irqs_away: irq=%u "
723                                         "ibm,get-xive returns %d\n",
724                                         virq, status);
725                         goto unlock;
726                 }
727
728                 /*
729                  * We only support delivery to all cpus or to one cpu.
730                  * The irq has to be migrated only in the single cpu
731                  * case.
732                  */
733                 if (xics_status[0] != get_hard_smp_processor_id(cpu))
734                         goto unlock;
735
736                 printk(KERN_WARNING "IRQ %u affinity broken off cpu %u\n",
737                        virq, cpu);
738
739                 /* Reset affinity to all cpus */
740                 desc->handler->set_affinity(virq, CPU_MASK_ALL);
741                 irq_affinity[virq] = CPU_MASK_ALL;
742 unlock:
743                 spin_unlock_irqrestore(&desc->lock, flags);
744         }
745 }
746 #endif