]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/xen/events.c
xen: use our own eventchannel->irq path
[karo-tx-linux.git] / drivers / xen / events.c
1 /*
2  * Xen event channels
3  *
4  * Xen models interrupts with abstract event channels.  Because each
5  * domain gets 1024 event channels, but NR_IRQ is not that large, we
6  * must dynamically map irqs<->event channels.  The event channels
7  * interface with the rest of the kernel by defining a xen interrupt
8  * chip.  When an event is recieved, it is mapped to an irq and sent
9  * through the normal interrupt processing path.
10  *
11  * There are four kinds of events which can be mapped to an event
12  * channel:
13  *
14  * 1. Inter-domain notifications.  This includes all the virtual
15  *    device events, since they're driven by front-ends in another domain
16  *    (typically dom0).
17  * 2. VIRQs, typically used for timers.  These are per-cpu events.
18  * 3. IPIs.
19  * 4. Hardware interrupts. Not supported at present.
20  *
21  * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
22  */
23
24 #include <linux/linkage.h>
25 #include <linux/interrupt.h>
26 #include <linux/irq.h>
27 #include <linux/module.h>
28 #include <linux/string.h>
29 #include <linux/bootmem.h>
30
31 #include <asm/ptrace.h>
32 #include <asm/irq.h>
33 #include <asm/idle.h>
34 #include <asm/sync_bitops.h>
35 #include <asm/xen/hypercall.h>
36 #include <asm/xen/hypervisor.h>
37
38 #include <xen/xen-ops.h>
39 #include <xen/events.h>
40 #include <xen/interface/xen.h>
41 #include <xen/interface/event_channel.h>
42
43 /*
44  * This lock protects updates to the following mapping and reference-count
45  * arrays. The lock does not need to be acquired to read the mapping tables.
46  */
47 static DEFINE_SPINLOCK(irq_mapping_update_lock);
48
49 /* IRQ <-> VIRQ mapping. */
50 static DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1};
51
52 /* IRQ <-> IPI mapping */
53 static DEFINE_PER_CPU(int, ipi_to_irq[XEN_NR_IPIS]) = {[0 ... XEN_NR_IPIS-1] = -1};
54
55 /* Packed IRQ information: binding type, sub-type index, and event channel. */
56 struct packed_irq
57 {
58         unsigned short evtchn;
59         unsigned char index;
60         unsigned char type;
61 };
62
63 static struct packed_irq irq_info[NR_IRQS];
64
65 /* Binding types. */
66 enum {
67         IRQT_UNBOUND,
68         IRQT_PIRQ,
69         IRQT_VIRQ,
70         IRQT_IPI,
71         IRQT_EVTCHN
72 };
73
74 /* Convenient shorthand for packed representation of an unbound IRQ. */
75 #define IRQ_UNBOUND     mk_irq_info(IRQT_UNBOUND, 0, 0)
76
77 static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
78         [0 ... NR_EVENT_CHANNELS-1] = -1
79 };
80 struct cpu_evtchn_s {
81         unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG];
82 };
83 static struct cpu_evtchn_s *cpu_evtchn_mask_p;
84 static inline unsigned long *cpu_evtchn_mask(int cpu)
85 {
86         return cpu_evtchn_mask_p[cpu].bits;
87 }
88 static u8 cpu_evtchn[NR_EVENT_CHANNELS];
89
90 /* Reference counts for bindings to IRQs. */
91 static int irq_bindcount[NR_IRQS];
92
93 /* Xen will never allocate port zero for any purpose. */
94 #define VALID_EVTCHN(chn)       ((chn) != 0)
95
96 static struct irq_chip xen_dynamic_chip;
97
98 /* Constructor for packed IRQ information. */
99 static inline struct packed_irq mk_irq_info(u32 type, u32 index, u32 evtchn)
100 {
101         return (struct packed_irq) { evtchn, index, type };
102 }
103
104 /*
105  * Accessors for packed IRQ information.
106  */
107 static inline unsigned int evtchn_from_irq(int irq)
108 {
109         return irq_info[irq].evtchn;
110 }
111
112 static inline unsigned int index_from_irq(int irq)
113 {
114         return irq_info[irq].index;
115 }
116
117 static inline unsigned int type_from_irq(int irq)
118 {
119         return irq_info[irq].type;
120 }
121
122 static inline unsigned long active_evtchns(unsigned int cpu,
123                                            struct shared_info *sh,
124                                            unsigned int idx)
125 {
126         return (sh->evtchn_pending[idx] &
127                 cpu_evtchn_mask(cpu)[idx] &
128                 ~sh->evtchn_mask[idx]);
129 }
130
131 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
132 {
133         int irq = evtchn_to_irq[chn];
134
135         BUG_ON(irq == -1);
136 #ifdef CONFIG_SMP
137         cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu));
138 #endif
139
140         __clear_bit(chn, cpu_evtchn_mask(cpu_evtchn[chn]));
141         __set_bit(chn, cpu_evtchn_mask(cpu));
142
143         cpu_evtchn[chn] = cpu;
144 }
145
146 static void init_evtchn_cpu_bindings(void)
147 {
148 #ifdef CONFIG_SMP
149         struct irq_desc *desc;
150         int i;
151
152         /* By default all event channels notify CPU#0. */
153         for_each_irq_desc(i, desc) {
154                 cpumask_copy(desc->affinity, cpumask_of(0));
155         }
156 #endif
157
158         memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
159         memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0)));
160 }
161
162 static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
163 {
164         return cpu_evtchn[evtchn];
165 }
166
167 static inline void clear_evtchn(int port)
168 {
169         struct shared_info *s = HYPERVISOR_shared_info;
170         sync_clear_bit(port, &s->evtchn_pending[0]);
171 }
172
173 static inline void set_evtchn(int port)
174 {
175         struct shared_info *s = HYPERVISOR_shared_info;
176         sync_set_bit(port, &s->evtchn_pending[0]);
177 }
178
179 static inline int test_evtchn(int port)
180 {
181         struct shared_info *s = HYPERVISOR_shared_info;
182         return sync_test_bit(port, &s->evtchn_pending[0]);
183 }
184
185
186 /**
187  * notify_remote_via_irq - send event to remote end of event channel via irq
188  * @irq: irq of event channel to send event to
189  *
190  * Unlike notify_remote_via_evtchn(), this is safe to use across
191  * save/restore. Notifications on a broken connection are silently
192  * dropped.
193  */
194 void notify_remote_via_irq(int irq)
195 {
196         int evtchn = evtchn_from_irq(irq);
197
198         if (VALID_EVTCHN(evtchn))
199                 notify_remote_via_evtchn(evtchn);
200 }
201 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
202
203 static void mask_evtchn(int port)
204 {
205         struct shared_info *s = HYPERVISOR_shared_info;
206         sync_set_bit(port, &s->evtchn_mask[0]);
207 }
208
209 static void unmask_evtchn(int port)
210 {
211         struct shared_info *s = HYPERVISOR_shared_info;
212         unsigned int cpu = get_cpu();
213
214         BUG_ON(!irqs_disabled());
215
216         /* Slow path (hypercall) if this is a non-local port. */
217         if (unlikely(cpu != cpu_from_evtchn(port))) {
218                 struct evtchn_unmask unmask = { .port = port };
219                 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
220         } else {
221                 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
222
223                 sync_clear_bit(port, &s->evtchn_mask[0]);
224
225                 /*
226                  * The following is basically the equivalent of
227                  * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
228                  * the interrupt edge' if the channel is masked.
229                  */
230                 if (sync_test_bit(port, &s->evtchn_pending[0]) &&
231                     !sync_test_and_set_bit(port / BITS_PER_LONG,
232                                            &vcpu_info->evtchn_pending_sel))
233                         vcpu_info->evtchn_upcall_pending = 1;
234         }
235
236         put_cpu();
237 }
238
239 static int find_unbound_irq(void)
240 {
241         int irq;
242         struct irq_desc *desc;
243
244         /* Only allocate from dynirq range */
245         for (irq = 0; irq < nr_irqs; irq++)
246                 if (irq_bindcount[irq] == 0)
247                         break;
248
249         if (irq == nr_irqs)
250                 panic("No available IRQ to bind to: increase nr_irqs!\n");
251
252         desc = irq_to_desc_alloc_cpu(irq, 0);
253         if (WARN_ON(desc == NULL))
254                 return -1;
255
256         return irq;
257 }
258
259 int bind_evtchn_to_irq(unsigned int evtchn)
260 {
261         int irq;
262
263         spin_lock(&irq_mapping_update_lock);
264
265         irq = evtchn_to_irq[evtchn];
266
267         if (irq == -1) {
268                 irq = find_unbound_irq();
269
270                 dynamic_irq_init(irq);
271                 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
272                                               handle_level_irq, "event");
273
274                 evtchn_to_irq[evtchn] = irq;
275                 irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
276         }
277
278         irq_bindcount[irq]++;
279
280         spin_unlock(&irq_mapping_update_lock);
281
282         return irq;
283 }
284 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
285
286 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
287 {
288         struct evtchn_bind_ipi bind_ipi;
289         int evtchn, irq;
290
291         spin_lock(&irq_mapping_update_lock);
292
293         irq = per_cpu(ipi_to_irq, cpu)[ipi];
294         if (irq == -1) {
295                 irq = find_unbound_irq();
296                 if (irq < 0)
297                         goto out;
298
299                 dynamic_irq_init(irq);
300                 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
301                                               handle_level_irq, "ipi");
302
303                 bind_ipi.vcpu = cpu;
304                 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
305                                                 &bind_ipi) != 0)
306                         BUG();
307                 evtchn = bind_ipi.port;
308
309                 evtchn_to_irq[evtchn] = irq;
310                 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
311
312                 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
313
314                 bind_evtchn_to_cpu(evtchn, cpu);
315         }
316
317         irq_bindcount[irq]++;
318
319  out:
320         spin_unlock(&irq_mapping_update_lock);
321         return irq;
322 }
323
324
325 static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
326 {
327         struct evtchn_bind_virq bind_virq;
328         int evtchn, irq;
329
330         spin_lock(&irq_mapping_update_lock);
331
332         irq = per_cpu(virq_to_irq, cpu)[virq];
333
334         if (irq == -1) {
335                 bind_virq.virq = virq;
336                 bind_virq.vcpu = cpu;
337                 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
338                                                 &bind_virq) != 0)
339                         BUG();
340                 evtchn = bind_virq.port;
341
342                 irq = find_unbound_irq();
343
344                 dynamic_irq_init(irq);
345                 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
346                                               handle_level_irq, "virq");
347
348                 evtchn_to_irq[evtchn] = irq;
349                 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
350
351                 per_cpu(virq_to_irq, cpu)[virq] = irq;
352
353                 bind_evtchn_to_cpu(evtchn, cpu);
354         }
355
356         irq_bindcount[irq]++;
357
358         spin_unlock(&irq_mapping_update_lock);
359
360         return irq;
361 }
362
363 static void unbind_from_irq(unsigned int irq)
364 {
365         struct evtchn_close close;
366         int evtchn = evtchn_from_irq(irq);
367
368         spin_lock(&irq_mapping_update_lock);
369
370         if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
371                 close.port = evtchn;
372                 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
373                         BUG();
374
375                 switch (type_from_irq(irq)) {
376                 case IRQT_VIRQ:
377                         per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
378                                 [index_from_irq(irq)] = -1;
379                         break;
380                 case IRQT_IPI:
381                         per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
382                                 [index_from_irq(irq)] = -1;
383                         break;
384                 default:
385                         break;
386                 }
387
388                 /* Closed ports are implicitly re-bound to VCPU0. */
389                 bind_evtchn_to_cpu(evtchn, 0);
390
391                 evtchn_to_irq[evtchn] = -1;
392                 irq_info[irq] = IRQ_UNBOUND;
393
394                 dynamic_irq_cleanup(irq);
395         }
396
397         spin_unlock(&irq_mapping_update_lock);
398 }
399
400 int bind_evtchn_to_irqhandler(unsigned int evtchn,
401                               irq_handler_t handler,
402                               unsigned long irqflags,
403                               const char *devname, void *dev_id)
404 {
405         unsigned int irq;
406         int retval;
407
408         irq = bind_evtchn_to_irq(evtchn);
409         retval = request_irq(irq, handler, irqflags, devname, dev_id);
410         if (retval != 0) {
411                 unbind_from_irq(irq);
412                 return retval;
413         }
414
415         return irq;
416 }
417 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
418
419 int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
420                             irq_handler_t handler,
421                             unsigned long irqflags, const char *devname, void *dev_id)
422 {
423         unsigned int irq;
424         int retval;
425
426         irq = bind_virq_to_irq(virq, cpu);
427         retval = request_irq(irq, handler, irqflags, devname, dev_id);
428         if (retval != 0) {
429                 unbind_from_irq(irq);
430                 return retval;
431         }
432
433         return irq;
434 }
435 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
436
437 int bind_ipi_to_irqhandler(enum ipi_vector ipi,
438                            unsigned int cpu,
439                            irq_handler_t handler,
440                            unsigned long irqflags,
441                            const char *devname,
442                            void *dev_id)
443 {
444         int irq, retval;
445
446         irq = bind_ipi_to_irq(ipi, cpu);
447         if (irq < 0)
448                 return irq;
449
450         retval = request_irq(irq, handler, irqflags, devname, dev_id);
451         if (retval != 0) {
452                 unbind_from_irq(irq);
453                 return retval;
454         }
455
456         return irq;
457 }
458
459 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
460 {
461         free_irq(irq, dev_id);
462         unbind_from_irq(irq);
463 }
464 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
465
466 void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
467 {
468         int irq = per_cpu(ipi_to_irq, cpu)[vector];
469         BUG_ON(irq < 0);
470         notify_remote_via_irq(irq);
471 }
472
473 irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
474 {
475         struct shared_info *sh = HYPERVISOR_shared_info;
476         int cpu = smp_processor_id();
477         int i;
478         unsigned long flags;
479         static DEFINE_SPINLOCK(debug_lock);
480
481         spin_lock_irqsave(&debug_lock, flags);
482
483         printk("vcpu %d\n  ", cpu);
484
485         for_each_online_cpu(i) {
486                 struct vcpu_info *v = per_cpu(xen_vcpu, i);
487                 printk("%d: masked=%d pending=%d event_sel %08lx\n  ", i,
488                         (get_irq_regs() && i == cpu) ? xen_irqs_disabled(get_irq_regs()) : v->evtchn_upcall_mask,
489                         v->evtchn_upcall_pending,
490                         v->evtchn_pending_sel);
491         }
492         printk("pending:\n   ");
493         for(i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
494                 printk("%08lx%s", sh->evtchn_pending[i],
495                         i % 8 == 0 ? "\n   " : " ");
496         printk("\nmasks:\n   ");
497         for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
498                 printk("%08lx%s", sh->evtchn_mask[i],
499                         i % 8 == 0 ? "\n   " : " ");
500
501         printk("\nunmasked:\n   ");
502         for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
503                 printk("%08lx%s", sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
504                         i % 8 == 0 ? "\n   " : " ");
505
506         printk("\npending list:\n");
507         for(i = 0; i < NR_EVENT_CHANNELS; i++) {
508                 if (sync_test_bit(i, sh->evtchn_pending)) {
509                         printk("  %d: event %d -> irq %d\n",
510                                 cpu_evtchn[i], i,
511                                 evtchn_to_irq[i]);
512                 }
513         }
514
515         spin_unlock_irqrestore(&debug_lock, flags);
516
517         return IRQ_HANDLED;
518 }
519
520
521 static void xen_do_irq(unsigned irq, struct pt_regs *regs)
522 {
523         struct pt_regs *old_regs = set_irq_regs(regs);
524
525         if (WARN_ON(irq == -1))
526                 return;
527
528         exit_idle();
529         irq_enter();
530
531         //printk("cpu %d handling irq %d\n", smp_processor_id(), info->irq);
532         handle_irq(irq, regs);
533
534         irq_exit();
535
536         set_irq_regs(old_regs);
537 }
538
539 /*
540  * Search the CPUs pending events bitmasks.  For each one found, map
541  * the event number to an irq, and feed it into do_IRQ() for
542  * handling.
543  *
544  * Xen uses a two-level bitmap to speed searching.  The first level is
545  * a bitset of words which contain pending event bits.  The second
546  * level is a bitset of pending events themselves.
547  */
548 void xen_evtchn_do_upcall(struct pt_regs *regs)
549 {
550         int cpu = get_cpu();
551         struct shared_info *s = HYPERVISOR_shared_info;
552         struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
553         static DEFINE_PER_CPU(unsigned, nesting_count);
554         unsigned count;
555
556         do {
557                 unsigned long pending_words;
558
559                 vcpu_info->evtchn_upcall_pending = 0;
560
561                 if (__get_cpu_var(nesting_count)++)
562                         goto out;
563
564 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
565                 /* Clear master flag /before/ clearing selector flag. */
566                 wmb();
567 #endif
568                 pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
569                 while (pending_words != 0) {
570                         unsigned long pending_bits;
571                         int word_idx = __ffs(pending_words);
572                         pending_words &= ~(1UL << word_idx);
573
574                         while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) {
575                                 int bit_idx = __ffs(pending_bits);
576                                 int port = (word_idx * BITS_PER_LONG) + bit_idx;
577                                 int irq = evtchn_to_irq[port];
578
579                                 xen_do_irq(irq, regs);
580                         }
581                 }
582
583                 BUG_ON(!irqs_disabled());
584
585                 count = __get_cpu_var(nesting_count);
586                 __get_cpu_var(nesting_count) = 0;
587         } while(count != 1);
588
589 out:
590         put_cpu();
591 }
592
593 /* Rebind a new event channel to an existing irq. */
594 void rebind_evtchn_irq(int evtchn, int irq)
595 {
596         /* Make sure the irq is masked, since the new event channel
597            will also be masked. */
598         disable_irq(irq);
599
600         spin_lock(&irq_mapping_update_lock);
601
602         /* After resume the irq<->evtchn mappings are all cleared out */
603         BUG_ON(evtchn_to_irq[evtchn] != -1);
604         /* Expect irq to have been bound before,
605            so the bindcount should be non-0 */
606         BUG_ON(irq_bindcount[irq] == 0);
607
608         evtchn_to_irq[evtchn] = irq;
609         irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
610
611         spin_unlock(&irq_mapping_update_lock);
612
613         /* new event channels are always bound to cpu 0 */
614         irq_set_affinity(irq, cpumask_of(0));
615
616         /* Unmask the event channel. */
617         enable_irq(irq);
618 }
619
620 /* Rebind an evtchn so that it gets delivered to a specific cpu */
621 static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
622 {
623         struct evtchn_bind_vcpu bind_vcpu;
624         int evtchn = evtchn_from_irq(irq);
625
626         if (!VALID_EVTCHN(evtchn))
627                 return;
628
629         /* Send future instances of this interrupt to other vcpu. */
630         bind_vcpu.port = evtchn;
631         bind_vcpu.vcpu = tcpu;
632
633         /*
634          * If this fails, it usually just indicates that we're dealing with a
635          * virq or IPI channel, which don't actually need to be rebound. Ignore
636          * it, but don't do the xenlinux-level rebind in that case.
637          */
638         if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
639                 bind_evtchn_to_cpu(evtchn, tcpu);
640 }
641
642
643 static void set_affinity_irq(unsigned irq, const struct cpumask *dest)
644 {
645         unsigned tcpu = cpumask_first(dest);
646         rebind_irq_to_cpu(irq, tcpu);
647 }
648
649 int resend_irq_on_evtchn(unsigned int irq)
650 {
651         int masked, evtchn = evtchn_from_irq(irq);
652         struct shared_info *s = HYPERVISOR_shared_info;
653
654         if (!VALID_EVTCHN(evtchn))
655                 return 1;
656
657         masked = sync_test_and_set_bit(evtchn, s->evtchn_mask);
658         sync_set_bit(evtchn, s->evtchn_pending);
659         if (!masked)
660                 unmask_evtchn(evtchn);
661
662         return 1;
663 }
664
665 static void enable_dynirq(unsigned int irq)
666 {
667         int evtchn = evtchn_from_irq(irq);
668
669         if (VALID_EVTCHN(evtchn))
670                 unmask_evtchn(evtchn);
671 }
672
673 static void disable_dynirq(unsigned int irq)
674 {
675         int evtchn = evtchn_from_irq(irq);
676
677         if (VALID_EVTCHN(evtchn))
678                 mask_evtchn(evtchn);
679 }
680
681 static void ack_dynirq(unsigned int irq)
682 {
683         int evtchn = evtchn_from_irq(irq);
684
685         move_native_irq(irq);
686
687         if (VALID_EVTCHN(evtchn))
688                 clear_evtchn(evtchn);
689 }
690
691 static int retrigger_dynirq(unsigned int irq)
692 {
693         int evtchn = evtchn_from_irq(irq);
694         struct shared_info *sh = HYPERVISOR_shared_info;
695         int ret = 0;
696
697         if (VALID_EVTCHN(evtchn)) {
698                 int masked;
699
700                 masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask);
701                 sync_set_bit(evtchn, sh->evtchn_pending);
702                 if (!masked)
703                         unmask_evtchn(evtchn);
704                 ret = 1;
705         }
706
707         return ret;
708 }
709
710 static void restore_cpu_virqs(unsigned int cpu)
711 {
712         struct evtchn_bind_virq bind_virq;
713         int virq, irq, evtchn;
714
715         for (virq = 0; virq < NR_VIRQS; virq++) {
716                 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
717                         continue;
718
719                 BUG_ON(irq_info[irq].type != IRQT_VIRQ);
720                 BUG_ON(irq_info[irq].index != virq);
721
722                 /* Get a new binding from Xen. */
723                 bind_virq.virq = virq;
724                 bind_virq.vcpu = cpu;
725                 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
726                                                 &bind_virq) != 0)
727                         BUG();
728                 evtchn = bind_virq.port;
729
730                 /* Record the new mapping. */
731                 evtchn_to_irq[evtchn] = irq;
732                 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
733                 bind_evtchn_to_cpu(evtchn, cpu);
734
735                 /* Ready for use. */
736                 unmask_evtchn(evtchn);
737         }
738 }
739
740 static void restore_cpu_ipis(unsigned int cpu)
741 {
742         struct evtchn_bind_ipi bind_ipi;
743         int ipi, irq, evtchn;
744
745         for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
746                 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
747                         continue;
748
749                 BUG_ON(irq_info[irq].type != IRQT_IPI);
750                 BUG_ON(irq_info[irq].index != ipi);
751
752                 /* Get a new binding from Xen. */
753                 bind_ipi.vcpu = cpu;
754                 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
755                                                 &bind_ipi) != 0)
756                         BUG();
757                 evtchn = bind_ipi.port;
758
759                 /* Record the new mapping. */
760                 evtchn_to_irq[evtchn] = irq;
761                 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
762                 bind_evtchn_to_cpu(evtchn, cpu);
763
764                 /* Ready for use. */
765                 unmask_evtchn(evtchn);
766
767         }
768 }
769
770 /* Clear an irq's pending state, in preparation for polling on it */
771 void xen_clear_irq_pending(int irq)
772 {
773         int evtchn = evtchn_from_irq(irq);
774
775         if (VALID_EVTCHN(evtchn))
776                 clear_evtchn(evtchn);
777 }
778
779 void xen_set_irq_pending(int irq)
780 {
781         int evtchn = evtchn_from_irq(irq);
782
783         if (VALID_EVTCHN(evtchn))
784                 set_evtchn(evtchn);
785 }
786
787 bool xen_test_irq_pending(int irq)
788 {
789         int evtchn = evtchn_from_irq(irq);
790         bool ret = false;
791
792         if (VALID_EVTCHN(evtchn))
793                 ret = test_evtchn(evtchn);
794
795         return ret;
796 }
797
798 /* Poll waiting for an irq to become pending.  In the usual case, the
799    irq will be disabled so it won't deliver an interrupt. */
800 void xen_poll_irq(int irq)
801 {
802         evtchn_port_t evtchn = evtchn_from_irq(irq);
803
804         if (VALID_EVTCHN(evtchn)) {
805                 struct sched_poll poll;
806
807                 poll.nr_ports = 1;
808                 poll.timeout = 0;
809                 set_xen_guest_handle(poll.ports, &evtchn);
810
811                 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
812                         BUG();
813         }
814 }
815
816 void xen_irq_resume(void)
817 {
818         unsigned int cpu, irq, evtchn;
819
820         init_evtchn_cpu_bindings();
821
822         /* New event-channel space is not 'live' yet. */
823         for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
824                 mask_evtchn(evtchn);
825
826         /* No IRQ <-> event-channel mappings. */
827         for (irq = 0; irq < nr_irqs; irq++)
828                 irq_info[irq].evtchn = 0; /* zap event-channel binding */
829
830         for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
831                 evtchn_to_irq[evtchn] = -1;
832
833         for_each_possible_cpu(cpu) {
834                 restore_cpu_virqs(cpu);
835                 restore_cpu_ipis(cpu);
836         }
837 }
838
839 static struct irq_chip xen_dynamic_chip __read_mostly = {
840         .name           = "xen-dyn",
841
842         .disable        = disable_dynirq,
843         .mask           = disable_dynirq,
844         .unmask         = enable_dynirq,
845
846         .ack            = ack_dynirq,
847         .set_affinity   = set_affinity_irq,
848         .retrigger      = retrigger_dynirq,
849 };
850
851 void __init xen_init_IRQ(void)
852 {
853         int i;
854         size_t size = nr_cpu_ids * sizeof(struct cpu_evtchn_s);
855
856         cpu_evtchn_mask_p = alloc_bootmem(size);
857         BUG_ON(cpu_evtchn_mask_p == NULL);
858
859         init_evtchn_cpu_bindings();
860
861         /* No event channels are 'live' right now. */
862         for (i = 0; i < NR_EVENT_CHANNELS; i++)
863                 mask_evtchn(i);
864
865         /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
866         for (i = 0; i < nr_irqs; i++)
867                 irq_bindcount[i] = 0;
868
869         irq_ctx_init(smp_processor_id());
870 }