4 * Xen models interrupts with abstract event channels. Because each
5 * domain gets 1024 event channels, but NR_IRQ is not that large, we
6 * must dynamically map irqs<->event channels. The event channels
7 * interface with the rest of the kernel by defining a xen interrupt
8 * chip. When an event is recieved, it is mapped to an irq and sent
9 * through the normal interrupt processing path.
11 * There are four kinds of events which can be mapped to an event
14 * 1. Inter-domain notifications. This includes all the virtual
15 * device events, since they're driven by front-ends in another domain
17 * 2. VIRQs, typically used for timers. These are per-cpu events.
19 * 4. Hardware interrupts. Not supported at present.
21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
24 #include <linux/linkage.h>
25 #include <linux/interrupt.h>
26 #include <linux/irq.h>
27 #include <linux/module.h>
28 #include <linux/string.h>
29 #include <linux/bootmem.h>
30 #include <linux/slab.h>
33 #include <asm/ptrace.h>
36 #include <asm/sync_bitops.h>
37 #include <asm/xen/hypercall.h>
38 #include <asm/xen/hypervisor.h>
42 #include <xen/xen-ops.h>
43 #include <xen/events.h>
44 #include <xen/interface/xen.h>
45 #include <xen/interface/event_channel.h>
46 #include <xen/interface/hvm/hvm_op.h>
47 #include <xen/interface/hvm/params.h>
50 * This lock protects updates to the following mapping and reference-count
51 * arrays. The lock does not need to be acquired to read the mapping tables.
53 static DEFINE_SPINLOCK(irq_mapping_update_lock);
55 /* IRQ <-> VIRQ mapping. */
56 static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
58 /* IRQ <-> IPI mapping */
59 static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
61 /* Interrupt types. */
71 * Packed IRQ information:
72 * type - enum xen_irq_type
73 * event channel - irq->event channel mapping
74 * cpu - cpu this event channel is bound to
75 * index - type-specific information:
76 * PIRQ - vector, with MSB being "needs EIO"
83 enum xen_irq_type type; /* type */
84 unsigned short evtchn; /* event channel */
85 unsigned short cpu; /* cpu bound */
92 unsigned short vector;
97 static struct irq_info irq_info[NR_IRQS];
99 static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
100 [0 ... NR_EVENT_CHANNELS-1] = -1
102 struct cpu_evtchn_s {
103 unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG];
105 static struct cpu_evtchn_s *cpu_evtchn_mask_p;
106 static inline unsigned long *cpu_evtchn_mask(int cpu)
108 return cpu_evtchn_mask_p[cpu].bits;
111 /* Xen will never allocate port zero for any purpose. */
112 #define VALID_EVTCHN(chn) ((chn) != 0)
114 static struct irq_chip xen_dynamic_chip;
116 /* Constructor for packed IRQ information. */
117 static struct irq_info mk_unbound_info(void)
119 return (struct irq_info) { .type = IRQT_UNBOUND };
122 static struct irq_info mk_evtchn_info(unsigned short evtchn)
124 return (struct irq_info) { .type = IRQT_EVTCHN, .evtchn = evtchn,
128 static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi)
130 return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn,
131 .cpu = 0, .u.ipi = ipi };
134 static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq)
136 return (struct irq_info) { .type = IRQT_VIRQ, .evtchn = evtchn,
137 .cpu = 0, .u.virq = virq };
140 static struct irq_info mk_pirq_info(unsigned short evtchn,
141 unsigned short gsi, unsigned short vector)
143 return (struct irq_info) { .type = IRQT_PIRQ, .evtchn = evtchn,
144 .cpu = 0, .u.pirq = { .gsi = gsi, .vector = vector } };
148 * Accessors for packed IRQ information.
150 static struct irq_info *info_for_irq(unsigned irq)
152 return &irq_info[irq];
155 static unsigned int evtchn_from_irq(unsigned irq)
157 return info_for_irq(irq)->evtchn;
160 unsigned irq_from_evtchn(unsigned int evtchn)
162 return evtchn_to_irq[evtchn];
164 EXPORT_SYMBOL_GPL(irq_from_evtchn);
166 static enum ipi_vector ipi_from_irq(unsigned irq)
168 struct irq_info *info = info_for_irq(irq);
170 BUG_ON(info == NULL);
171 BUG_ON(info->type != IRQT_IPI);
176 static unsigned virq_from_irq(unsigned irq)
178 struct irq_info *info = info_for_irq(irq);
180 BUG_ON(info == NULL);
181 BUG_ON(info->type != IRQT_VIRQ);
186 static unsigned gsi_from_irq(unsigned irq)
188 struct irq_info *info = info_for_irq(irq);
190 BUG_ON(info == NULL);
191 BUG_ON(info->type != IRQT_PIRQ);
193 return info->u.pirq.gsi;
196 static unsigned vector_from_irq(unsigned irq)
198 struct irq_info *info = info_for_irq(irq);
200 BUG_ON(info == NULL);
201 BUG_ON(info->type != IRQT_PIRQ);
203 return info->u.pirq.vector;
206 static enum xen_irq_type type_from_irq(unsigned irq)
208 return info_for_irq(irq)->type;
211 static unsigned cpu_from_irq(unsigned irq)
213 return info_for_irq(irq)->cpu;
216 static unsigned int cpu_from_evtchn(unsigned int evtchn)
218 int irq = evtchn_to_irq[evtchn];
222 ret = cpu_from_irq(irq);
227 static inline unsigned long active_evtchns(unsigned int cpu,
228 struct shared_info *sh,
231 return (sh->evtchn_pending[idx] &
232 cpu_evtchn_mask(cpu)[idx] &
233 ~sh->evtchn_mask[idx]);
236 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
238 int irq = evtchn_to_irq[chn];
242 cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu));
245 __clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq)));
246 __set_bit(chn, cpu_evtchn_mask(cpu));
248 irq_info[irq].cpu = cpu;
251 static void init_evtchn_cpu_bindings(void)
254 struct irq_desc *desc;
257 /* By default all event channels notify CPU#0. */
258 for_each_irq_desc(i, desc) {
259 cpumask_copy(desc->affinity, cpumask_of(0));
263 memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0)));
266 static inline void clear_evtchn(int port)
268 struct shared_info *s = HYPERVISOR_shared_info;
269 sync_clear_bit(port, &s->evtchn_pending[0]);
272 static inline void set_evtchn(int port)
274 struct shared_info *s = HYPERVISOR_shared_info;
275 sync_set_bit(port, &s->evtchn_pending[0]);
278 static inline int test_evtchn(int port)
280 struct shared_info *s = HYPERVISOR_shared_info;
281 return sync_test_bit(port, &s->evtchn_pending[0]);
286 * notify_remote_via_irq - send event to remote end of event channel via irq
287 * @irq: irq of event channel to send event to
289 * Unlike notify_remote_via_evtchn(), this is safe to use across
290 * save/restore. Notifications on a broken connection are silently
293 void notify_remote_via_irq(int irq)
295 int evtchn = evtchn_from_irq(irq);
297 if (VALID_EVTCHN(evtchn))
298 notify_remote_via_evtchn(evtchn);
300 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
302 static void mask_evtchn(int port)
304 struct shared_info *s = HYPERVISOR_shared_info;
305 sync_set_bit(port, &s->evtchn_mask[0]);
308 static void unmask_evtchn(int port)
310 struct shared_info *s = HYPERVISOR_shared_info;
311 unsigned int cpu = get_cpu();
313 BUG_ON(!irqs_disabled());
315 /* Slow path (hypercall) if this is a non-local port. */
316 if (unlikely(cpu != cpu_from_evtchn(port))) {
317 struct evtchn_unmask unmask = { .port = port };
318 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
320 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
322 sync_clear_bit(port, &s->evtchn_mask[0]);
325 * The following is basically the equivalent of
326 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
327 * the interrupt edge' if the channel is masked.
329 if (sync_test_bit(port, &s->evtchn_pending[0]) &&
330 !sync_test_and_set_bit(port / BITS_PER_LONG,
331 &vcpu_info->evtchn_pending_sel))
332 vcpu_info->evtchn_upcall_pending = 1;
338 static int find_unbound_irq(void)
341 struct irq_desc *desc;
343 for (irq = 0; irq < nr_irqs; irq++) {
344 desc = irq_to_desc(irq);
345 /* only 0->15 have init'd desc; handle irq > 16 */
348 if (desc->chip == &no_irq_chip)
350 if (desc->chip != &xen_dynamic_chip)
352 if (irq_info[irq].type == IRQT_UNBOUND)
357 panic("No available IRQ to bind to: increase nr_irqs!\n");
359 desc = irq_to_desc_alloc_node(irq, 0);
360 if (WARN_ON(desc == NULL))
363 dynamic_irq_init_keep_chip_data(irq);
368 int bind_evtchn_to_irq(unsigned int evtchn)
372 spin_lock(&irq_mapping_update_lock);
374 irq = evtchn_to_irq[evtchn];
377 irq = find_unbound_irq();
379 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
380 handle_level_irq, "event");
382 evtchn_to_irq[evtchn] = irq;
383 irq_info[irq] = mk_evtchn_info(evtchn);
386 spin_unlock(&irq_mapping_update_lock);
390 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
392 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
394 struct evtchn_bind_ipi bind_ipi;
397 spin_lock(&irq_mapping_update_lock);
399 irq = per_cpu(ipi_to_irq, cpu)[ipi];
402 irq = find_unbound_irq();
406 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
407 handle_level_irq, "ipi");
410 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
413 evtchn = bind_ipi.port;
415 evtchn_to_irq[evtchn] = irq;
416 irq_info[irq] = mk_ipi_info(evtchn, ipi);
417 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
419 bind_evtchn_to_cpu(evtchn, cpu);
423 spin_unlock(&irq_mapping_update_lock);
428 static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
430 struct evtchn_bind_virq bind_virq;
433 spin_lock(&irq_mapping_update_lock);
435 irq = per_cpu(virq_to_irq, cpu)[virq];
438 bind_virq.virq = virq;
439 bind_virq.vcpu = cpu;
440 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
443 evtchn = bind_virq.port;
445 irq = find_unbound_irq();
447 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
448 handle_level_irq, "virq");
450 evtchn_to_irq[evtchn] = irq;
451 irq_info[irq] = mk_virq_info(evtchn, virq);
453 per_cpu(virq_to_irq, cpu)[virq] = irq;
455 bind_evtchn_to_cpu(evtchn, cpu);
458 spin_unlock(&irq_mapping_update_lock);
463 static void unbind_from_irq(unsigned int irq)
465 struct evtchn_close close;
466 int evtchn = evtchn_from_irq(irq);
468 spin_lock(&irq_mapping_update_lock);
470 if (VALID_EVTCHN(evtchn)) {
472 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
475 switch (type_from_irq(irq)) {
477 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
478 [virq_from_irq(irq)] = -1;
481 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
482 [ipi_from_irq(irq)] = -1;
488 /* Closed ports are implicitly re-bound to VCPU0. */
489 bind_evtchn_to_cpu(evtchn, 0);
491 evtchn_to_irq[evtchn] = -1;
494 if (irq_info[irq].type != IRQT_UNBOUND) {
495 irq_info[irq] = mk_unbound_info();
497 dynamic_irq_cleanup(irq);
500 spin_unlock(&irq_mapping_update_lock);
503 int bind_evtchn_to_irqhandler(unsigned int evtchn,
504 irq_handler_t handler,
505 unsigned long irqflags,
506 const char *devname, void *dev_id)
511 irq = bind_evtchn_to_irq(evtchn);
512 retval = request_irq(irq, handler, irqflags, devname, dev_id);
514 unbind_from_irq(irq);
520 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
522 int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
523 irq_handler_t handler,
524 unsigned long irqflags, const char *devname, void *dev_id)
529 irq = bind_virq_to_irq(virq, cpu);
530 retval = request_irq(irq, handler, irqflags, devname, dev_id);
532 unbind_from_irq(irq);
538 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
540 int bind_ipi_to_irqhandler(enum ipi_vector ipi,
542 irq_handler_t handler,
543 unsigned long irqflags,
549 irq = bind_ipi_to_irq(ipi, cpu);
553 retval = request_irq(irq, handler, irqflags, devname, dev_id);
555 unbind_from_irq(irq);
562 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
564 free_irq(irq, dev_id);
565 unbind_from_irq(irq);
567 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
569 void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
571 int irq = per_cpu(ipi_to_irq, cpu)[vector];
573 notify_remote_via_irq(irq);
576 irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
578 struct shared_info *sh = HYPERVISOR_shared_info;
579 int cpu = smp_processor_id();
582 static DEFINE_SPINLOCK(debug_lock);
584 spin_lock_irqsave(&debug_lock, flags);
586 printk("vcpu %d\n ", cpu);
588 for_each_online_cpu(i) {
589 struct vcpu_info *v = per_cpu(xen_vcpu, i);
590 printk("%d: masked=%d pending=%d event_sel %08lx\n ", i,
591 (get_irq_regs() && i == cpu) ? xen_irqs_disabled(get_irq_regs()) : v->evtchn_upcall_mask,
592 v->evtchn_upcall_pending,
593 v->evtchn_pending_sel);
595 printk("pending:\n ");
596 for(i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
597 printk("%08lx%s", sh->evtchn_pending[i],
598 i % 8 == 0 ? "\n " : " ");
599 printk("\nmasks:\n ");
600 for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
601 printk("%08lx%s", sh->evtchn_mask[i],
602 i % 8 == 0 ? "\n " : " ");
604 printk("\nunmasked:\n ");
605 for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
606 printk("%08lx%s", sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
607 i % 8 == 0 ? "\n " : " ");
609 printk("\npending list:\n");
610 for(i = 0; i < NR_EVENT_CHANNELS; i++) {
611 if (sync_test_bit(i, sh->evtchn_pending)) {
612 printk(" %d: event %d -> irq %d\n",
613 cpu_from_evtchn(i), i,
618 spin_unlock_irqrestore(&debug_lock, flags);
623 static DEFINE_PER_CPU(unsigned, xed_nesting_count);
626 * Search the CPUs pending events bitmasks. For each one found, map
627 * the event number to an irq, and feed it into do_IRQ() for
630 * Xen uses a two-level bitmap to speed searching. The first level is
631 * a bitset of words which contain pending event bits. The second
632 * level is a bitset of pending events themselves.
634 static void __xen_evtchn_do_upcall(void)
637 struct shared_info *s = HYPERVISOR_shared_info;
638 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
642 unsigned long pending_words;
644 vcpu_info->evtchn_upcall_pending = 0;
646 if (__get_cpu_var(xed_nesting_count)++)
649 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
650 /* Clear master flag /before/ clearing selector flag. */
653 pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
654 while (pending_words != 0) {
655 unsigned long pending_bits;
656 int word_idx = __ffs(pending_words);
657 pending_words &= ~(1UL << word_idx);
659 while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) {
660 int bit_idx = __ffs(pending_bits);
661 int port = (word_idx * BITS_PER_LONG) + bit_idx;
662 int irq = evtchn_to_irq[port];
663 struct irq_desc *desc;
666 desc = irq_to_desc(irq);
668 generic_handle_irq_desc(irq, desc);
673 BUG_ON(!irqs_disabled());
675 count = __get_cpu_var(xed_nesting_count);
676 __get_cpu_var(xed_nesting_count) = 0;
677 } while (count != 1 || vcpu_info->evtchn_upcall_pending);
684 void xen_evtchn_do_upcall(struct pt_regs *regs)
686 struct pt_regs *old_regs = set_irq_regs(regs);
691 __xen_evtchn_do_upcall();
694 set_irq_regs(old_regs);
697 void xen_hvm_evtchn_do_upcall(void)
699 __xen_evtchn_do_upcall();
701 EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
703 /* Rebind a new event channel to an existing irq. */
704 void rebind_evtchn_irq(int evtchn, int irq)
706 struct irq_info *info = info_for_irq(irq);
708 /* Make sure the irq is masked, since the new event channel
709 will also be masked. */
712 spin_lock(&irq_mapping_update_lock);
714 /* After resume the irq<->evtchn mappings are all cleared out */
715 BUG_ON(evtchn_to_irq[evtchn] != -1);
716 /* Expect irq to have been bound before,
717 so there should be a proper type */
718 BUG_ON(info->type == IRQT_UNBOUND);
720 evtchn_to_irq[evtchn] = irq;
721 irq_info[irq] = mk_evtchn_info(evtchn);
723 spin_unlock(&irq_mapping_update_lock);
725 /* new event channels are always bound to cpu 0 */
726 irq_set_affinity(irq, cpumask_of(0));
728 /* Unmask the event channel. */
732 /* Rebind an evtchn so that it gets delivered to a specific cpu */
733 static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
735 struct evtchn_bind_vcpu bind_vcpu;
736 int evtchn = evtchn_from_irq(irq);
738 /* events delivered via platform PCI interrupts are always
739 * routed to vcpu 0 */
740 if (!VALID_EVTCHN(evtchn) ||
741 (xen_hvm_domain() && !xen_have_vector_callback))
744 /* Send future instances of this interrupt to other vcpu. */
745 bind_vcpu.port = evtchn;
746 bind_vcpu.vcpu = tcpu;
749 * If this fails, it usually just indicates that we're dealing with a
750 * virq or IPI channel, which don't actually need to be rebound. Ignore
751 * it, but don't do the xenlinux-level rebind in that case.
753 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
754 bind_evtchn_to_cpu(evtchn, tcpu);
759 static int set_affinity_irq(unsigned irq, const struct cpumask *dest)
761 unsigned tcpu = cpumask_first(dest);
763 return rebind_irq_to_cpu(irq, tcpu);
766 int resend_irq_on_evtchn(unsigned int irq)
768 int masked, evtchn = evtchn_from_irq(irq);
769 struct shared_info *s = HYPERVISOR_shared_info;
771 if (!VALID_EVTCHN(evtchn))
774 masked = sync_test_and_set_bit(evtchn, s->evtchn_mask);
775 sync_set_bit(evtchn, s->evtchn_pending);
777 unmask_evtchn(evtchn);
782 static void enable_dynirq(unsigned int irq)
784 int evtchn = evtchn_from_irq(irq);
786 if (VALID_EVTCHN(evtchn))
787 unmask_evtchn(evtchn);
790 static void disable_dynirq(unsigned int irq)
792 int evtchn = evtchn_from_irq(irq);
794 if (VALID_EVTCHN(evtchn))
798 static void ack_dynirq(unsigned int irq)
800 int evtchn = evtchn_from_irq(irq);
802 move_native_irq(irq);
804 if (VALID_EVTCHN(evtchn))
805 clear_evtchn(evtchn);
808 static int retrigger_dynirq(unsigned int irq)
810 int evtchn = evtchn_from_irq(irq);
811 struct shared_info *sh = HYPERVISOR_shared_info;
814 if (VALID_EVTCHN(evtchn)) {
817 masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask);
818 sync_set_bit(evtchn, sh->evtchn_pending);
820 unmask_evtchn(evtchn);
827 static void restore_cpu_virqs(unsigned int cpu)
829 struct evtchn_bind_virq bind_virq;
830 int virq, irq, evtchn;
832 for (virq = 0; virq < NR_VIRQS; virq++) {
833 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
836 BUG_ON(virq_from_irq(irq) != virq);
838 /* Get a new binding from Xen. */
839 bind_virq.virq = virq;
840 bind_virq.vcpu = cpu;
841 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
844 evtchn = bind_virq.port;
846 /* Record the new mapping. */
847 evtchn_to_irq[evtchn] = irq;
848 irq_info[irq] = mk_virq_info(evtchn, virq);
849 bind_evtchn_to_cpu(evtchn, cpu);
852 unmask_evtchn(evtchn);
856 static void restore_cpu_ipis(unsigned int cpu)
858 struct evtchn_bind_ipi bind_ipi;
859 int ipi, irq, evtchn;
861 for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
862 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
865 BUG_ON(ipi_from_irq(irq) != ipi);
867 /* Get a new binding from Xen. */
869 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
872 evtchn = bind_ipi.port;
874 /* Record the new mapping. */
875 evtchn_to_irq[evtchn] = irq;
876 irq_info[irq] = mk_ipi_info(evtchn, ipi);
877 bind_evtchn_to_cpu(evtchn, cpu);
880 unmask_evtchn(evtchn);
885 /* Clear an irq's pending state, in preparation for polling on it */
886 void xen_clear_irq_pending(int irq)
888 int evtchn = evtchn_from_irq(irq);
890 if (VALID_EVTCHN(evtchn))
891 clear_evtchn(evtchn);
894 void xen_set_irq_pending(int irq)
896 int evtchn = evtchn_from_irq(irq);
898 if (VALID_EVTCHN(evtchn))
902 bool xen_test_irq_pending(int irq)
904 int evtchn = evtchn_from_irq(irq);
907 if (VALID_EVTCHN(evtchn))
908 ret = test_evtchn(evtchn);
913 /* Poll waiting for an irq to become pending. In the usual case, the
914 irq will be disabled so it won't deliver an interrupt. */
915 void xen_poll_irq(int irq)
917 evtchn_port_t evtchn = evtchn_from_irq(irq);
919 if (VALID_EVTCHN(evtchn)) {
920 struct sched_poll poll;
924 set_xen_guest_handle(poll.ports, &evtchn);
926 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
931 void xen_irq_resume(void)
933 unsigned int cpu, irq, evtchn;
935 init_evtchn_cpu_bindings();
937 /* New event-channel space is not 'live' yet. */
938 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
941 /* No IRQ <-> event-channel mappings. */
942 for (irq = 0; irq < nr_irqs; irq++)
943 irq_info[irq].evtchn = 0; /* zap event-channel binding */
945 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
946 evtchn_to_irq[evtchn] = -1;
948 for_each_possible_cpu(cpu) {
949 restore_cpu_virqs(cpu);
950 restore_cpu_ipis(cpu);
954 static struct irq_chip xen_dynamic_chip __read_mostly = {
957 .disable = disable_dynirq,
958 .mask = disable_dynirq,
959 .unmask = enable_dynirq,
962 .set_affinity = set_affinity_irq,
963 .retrigger = retrigger_dynirq,
966 int xen_set_callback_via(uint64_t via)
968 struct xen_hvm_param a;
969 a.domid = DOMID_SELF;
970 a.index = HVM_PARAM_CALLBACK_IRQ;
972 return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
974 EXPORT_SYMBOL_GPL(xen_set_callback_via);
976 #ifdef CONFIG_XEN_PVHVM
977 /* Vector callbacks are better than PCI interrupts to receive event
978 * channel notifications because we can receive vector callbacks on any
979 * vcpu and we don't need PCI support or APIC interactions. */
980 void xen_callback_vector(void)
983 uint64_t callback_via;
984 if (xen_have_vector_callback) {
985 callback_via = HVM_CALLBACK_VECTOR(XEN_HVM_EVTCHN_CALLBACK);
986 rc = xen_set_callback_via(callback_via);
988 printk(KERN_ERR "Request for Xen HVM callback vector"
990 xen_have_vector_callback = 0;
993 printk(KERN_INFO "Xen HVM callback vector for event delivery is "
995 /* in the restore case the vector has already been allocated */
996 if (!test_bit(XEN_HVM_EVTCHN_CALLBACK, used_vectors))
997 alloc_intr_gate(XEN_HVM_EVTCHN_CALLBACK, xen_hvm_callback_vector);
1001 void xen_callback_vector(void) {}
1004 void __init xen_init_IRQ(void)
1008 cpu_evtchn_mask_p = kcalloc(nr_cpu_ids, sizeof(struct cpu_evtchn_s),
1010 BUG_ON(cpu_evtchn_mask_p == NULL);
1012 init_evtchn_cpu_bindings();
1014 /* No event channels are 'live' right now. */
1015 for (i = 0; i < NR_EVENT_CHANNELS; i++)
1018 if (xen_hvm_domain()) {
1019 xen_callback_vector();
1022 irq_ctx_init(smp_processor_id());