4 * Xen models interrupts with abstract event channels. Because each
5 * domain gets 1024 event channels, but NR_IRQ is not that large, we
6 * must dynamically map irqs<->event channels. The event channels
7 * interface with the rest of the kernel by defining a xen interrupt
8 * chip. When an event is recieved, it is mapped to an irq and sent
9 * through the normal interrupt processing path.
11 * There are four kinds of events which can be mapped to an event
14 * 1. Inter-domain notifications. This includes all the virtual
15 * device events, since they're driven by front-ends in another domain
17 * 2. VIRQs, typically used for timers. These are per-cpu events.
19 * 4. PIRQs - Hardware interrupts.
21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
24 #include <linux/linkage.h>
25 #include <linux/interrupt.h>
26 #include <linux/irq.h>
27 #include <linux/module.h>
28 #include <linux/string.h>
29 #include <linux/bootmem.h>
30 #include <linux/slab.h>
31 #include <linux/irqnr.h>
34 #include <asm/ptrace.h>
37 #include <asm/io_apic.h>
38 #include <asm/sync_bitops.h>
39 #include <asm/xen/pci.h>
40 #include <asm/xen/hypercall.h>
41 #include <asm/xen/hypervisor.h>
45 #include <xen/xen-ops.h>
46 #include <xen/events.h>
47 #include <xen/interface/xen.h>
48 #include <xen/interface/event_channel.h>
49 #include <xen/interface/hvm/hvm_op.h>
50 #include <xen/interface/hvm/params.h>
53 * This lock protects updates to the following mapping and reference-count
54 * arrays. The lock does not need to be acquired to read the mapping tables.
56 static DEFINE_SPINLOCK(irq_mapping_update_lock);
58 /* IRQ <-> VIRQ mapping. */
59 static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
61 /* IRQ <-> IPI mapping */
62 static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
64 /* Interrupt types. */
74 * Packed IRQ information:
75 * type - enum xen_irq_type
76 * event channel - irq->event channel mapping
77 * cpu - cpu this event channel is bound to
78 * index - type-specific information:
79 * PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
80 * guest, or GSI (real passthrough IRQ) of the device.
87 enum xen_irq_type type; /* type */
88 unsigned short evtchn; /* event channel */
89 unsigned short cpu; /* cpu bound */
102 #define PIRQ_NEEDS_EOI (1 << 0)
103 #define PIRQ_SHAREABLE (1 << 1)
105 static struct irq_info *irq_info;
106 static int *pirq_to_irq;
109 static int *evtchn_to_irq;
110 struct cpu_evtchn_s {
111 unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG];
114 static __initdata struct cpu_evtchn_s init_evtchn_mask = {
115 .bits[0 ... (NR_EVENT_CHANNELS/BITS_PER_LONG)-1] = ~0ul,
117 static struct cpu_evtchn_s *cpu_evtchn_mask_p = &init_evtchn_mask;
119 static inline unsigned long *cpu_evtchn_mask(int cpu)
121 return cpu_evtchn_mask_p[cpu].bits;
124 /* Xen will never allocate port zero for any purpose. */
125 #define VALID_EVTCHN(chn) ((chn) != 0)
127 static struct irq_chip xen_dynamic_chip;
128 static struct irq_chip xen_percpu_chip;
129 static struct irq_chip xen_pirq_chip;
131 /* Constructor for packed IRQ information. */
132 static struct irq_info mk_unbound_info(void)
134 return (struct irq_info) { .type = IRQT_UNBOUND };
137 static struct irq_info mk_evtchn_info(unsigned short evtchn)
139 return (struct irq_info) { .type = IRQT_EVTCHN, .evtchn = evtchn,
143 static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi)
145 return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn,
146 .cpu = 0, .u.ipi = ipi };
149 static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq)
151 return (struct irq_info) { .type = IRQT_VIRQ, .evtchn = evtchn,
152 .cpu = 0, .u.virq = virq };
155 static struct irq_info mk_pirq_info(unsigned short evtchn, unsigned short pirq,
156 unsigned short gsi, unsigned short vector)
158 return (struct irq_info) { .type = IRQT_PIRQ, .evtchn = evtchn,
160 .u.pirq = { .pirq = pirq, .gsi = gsi, .vector = vector } };
164 * Accessors for packed IRQ information.
166 static struct irq_info *info_for_irq(unsigned irq)
168 return &irq_info[irq];
171 static unsigned int evtchn_from_irq(unsigned irq)
173 return info_for_irq(irq)->evtchn;
176 unsigned irq_from_evtchn(unsigned int evtchn)
178 return evtchn_to_irq[evtchn];
180 EXPORT_SYMBOL_GPL(irq_from_evtchn);
182 static enum ipi_vector ipi_from_irq(unsigned irq)
184 struct irq_info *info = info_for_irq(irq);
186 BUG_ON(info == NULL);
187 BUG_ON(info->type != IRQT_IPI);
192 static unsigned virq_from_irq(unsigned irq)
194 struct irq_info *info = info_for_irq(irq);
196 BUG_ON(info == NULL);
197 BUG_ON(info->type != IRQT_VIRQ);
202 static unsigned pirq_from_irq(unsigned irq)
204 struct irq_info *info = info_for_irq(irq);
206 BUG_ON(info == NULL);
207 BUG_ON(info->type != IRQT_PIRQ);
209 return info->u.pirq.pirq;
212 static unsigned gsi_from_irq(unsigned irq)
214 struct irq_info *info = info_for_irq(irq);
216 BUG_ON(info == NULL);
217 BUG_ON(info->type != IRQT_PIRQ);
219 return info->u.pirq.gsi;
222 static unsigned vector_from_irq(unsigned irq)
224 struct irq_info *info = info_for_irq(irq);
226 BUG_ON(info == NULL);
227 BUG_ON(info->type != IRQT_PIRQ);
229 return info->u.pirq.vector;
232 static enum xen_irq_type type_from_irq(unsigned irq)
234 return info_for_irq(irq)->type;
237 static unsigned cpu_from_irq(unsigned irq)
239 return info_for_irq(irq)->cpu;
242 static unsigned int cpu_from_evtchn(unsigned int evtchn)
244 int irq = evtchn_to_irq[evtchn];
248 ret = cpu_from_irq(irq);
253 static bool pirq_needs_eoi(unsigned irq)
255 struct irq_info *info = info_for_irq(irq);
257 BUG_ON(info->type != IRQT_PIRQ);
259 return info->u.pirq.flags & PIRQ_NEEDS_EOI;
262 static inline unsigned long active_evtchns(unsigned int cpu,
263 struct shared_info *sh,
266 return (sh->evtchn_pending[idx] &
267 cpu_evtchn_mask(cpu)[idx] &
268 ~sh->evtchn_mask[idx]);
271 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
273 int irq = evtchn_to_irq[chn];
277 cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu));
280 __clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq)));
281 __set_bit(chn, cpu_evtchn_mask(cpu));
283 irq_info[irq].cpu = cpu;
286 static void init_evtchn_cpu_bindings(void)
289 struct irq_desc *desc;
292 /* By default all event channels notify CPU#0. */
293 for_each_irq_desc(i, desc) {
294 cpumask_copy(desc->affinity, cpumask_of(0));
298 memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0)));
301 static inline void clear_evtchn(int port)
303 struct shared_info *s = HYPERVISOR_shared_info;
304 sync_clear_bit(port, &s->evtchn_pending[0]);
307 static inline void set_evtchn(int port)
309 struct shared_info *s = HYPERVISOR_shared_info;
310 sync_set_bit(port, &s->evtchn_pending[0]);
313 static inline int test_evtchn(int port)
315 struct shared_info *s = HYPERVISOR_shared_info;
316 return sync_test_bit(port, &s->evtchn_pending[0]);
321 * notify_remote_via_irq - send event to remote end of event channel via irq
322 * @irq: irq of event channel to send event to
324 * Unlike notify_remote_via_evtchn(), this is safe to use across
325 * save/restore. Notifications on a broken connection are silently
328 void notify_remote_via_irq(int irq)
330 int evtchn = evtchn_from_irq(irq);
332 if (VALID_EVTCHN(evtchn))
333 notify_remote_via_evtchn(evtchn);
335 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
337 static void mask_evtchn(int port)
339 struct shared_info *s = HYPERVISOR_shared_info;
340 sync_set_bit(port, &s->evtchn_mask[0]);
343 static void unmask_evtchn(int port)
345 struct shared_info *s = HYPERVISOR_shared_info;
346 unsigned int cpu = get_cpu();
348 BUG_ON(!irqs_disabled());
350 /* Slow path (hypercall) if this is a non-local port. */
351 if (unlikely(cpu != cpu_from_evtchn(port))) {
352 struct evtchn_unmask unmask = { .port = port };
353 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
355 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
357 sync_clear_bit(port, &s->evtchn_mask[0]);
360 * The following is basically the equivalent of
361 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
362 * the interrupt edge' if the channel is masked.
364 if (sync_test_bit(port, &s->evtchn_pending[0]) &&
365 !sync_test_and_set_bit(port / BITS_PER_LONG,
366 &vcpu_info->evtchn_pending_sel))
367 vcpu_info->evtchn_upcall_pending = 1;
373 static int get_nr_hw_irqs(void)
377 #ifdef CONFIG_X86_IO_APIC
378 ret = get_nr_irqs_gsi();
384 /* callers of this function should make sure that PHYSDEVOP_get_nr_pirqs
385 * succeeded otherwise nr_pirqs won't hold the right value */
386 static int find_unbound_pirq(void)
389 for (i = nr_pirqs-1; i >= 0; i--) {
390 if (pirq_to_irq[i] < 0)
396 static int find_unbound_irq(void)
398 struct irq_data *data;
400 int start = get_nr_hw_irqs();
402 if (start == nr_irqs)
405 /* nr_irqs is a magic value. Must not use it.*/
406 for (irq = nr_irqs-1; irq > start; irq--) {
407 data = irq_get_irq_data(irq);
408 /* only 0->15 have init'd desc; handle irq > 16 */
411 if (data->chip == &no_irq_chip)
413 if (data->chip != &xen_dynamic_chip)
415 if (irq_info[irq].type == IRQT_UNBOUND)
422 res = irq_alloc_desc_at(irq, 0);
424 if (WARN_ON(res != irq))
430 panic("No available IRQ to bind to: increase nr_irqs!\n");
433 static bool identity_mapped_irq(unsigned irq)
435 /* identity map all the hardware irqs */
436 return irq < get_nr_hw_irqs();
439 static void pirq_unmask_notify(int irq)
441 struct physdev_eoi eoi = { .irq = pirq_from_irq(irq) };
443 if (unlikely(pirq_needs_eoi(irq))) {
444 int rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
449 static void pirq_query_unmask(int irq)
451 struct physdev_irq_status_query irq_status;
452 struct irq_info *info = info_for_irq(irq);
454 BUG_ON(info->type != IRQT_PIRQ);
456 irq_status.irq = pirq_from_irq(irq);
457 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
458 irq_status.flags = 0;
460 info->u.pirq.flags &= ~PIRQ_NEEDS_EOI;
461 if (irq_status.flags & XENIRQSTAT_needs_eoi)
462 info->u.pirq.flags |= PIRQ_NEEDS_EOI;
465 static bool probing_irq(int irq)
467 struct irq_desc *desc = irq_to_desc(irq);
469 return desc && desc->action == NULL;
472 static unsigned int startup_pirq(unsigned int irq)
474 struct evtchn_bind_pirq bind_pirq;
475 struct irq_info *info = info_for_irq(irq);
476 int evtchn = evtchn_from_irq(irq);
479 BUG_ON(info->type != IRQT_PIRQ);
481 if (VALID_EVTCHN(evtchn))
484 bind_pirq.pirq = pirq_from_irq(irq);
485 /* NB. We are happy to share unless we are probing. */
486 bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
487 BIND_PIRQ__WILL_SHARE : 0;
488 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
490 if (!probing_irq(irq))
491 printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
495 evtchn = bind_pirq.port;
497 pirq_query_unmask(irq);
499 evtchn_to_irq[evtchn] = irq;
500 bind_evtchn_to_cpu(evtchn, 0);
501 info->evtchn = evtchn;
504 unmask_evtchn(evtchn);
505 pirq_unmask_notify(irq);
510 static void shutdown_pirq(unsigned int irq)
512 struct evtchn_close close;
513 struct irq_info *info = info_for_irq(irq);
514 int evtchn = evtchn_from_irq(irq);
516 BUG_ON(info->type != IRQT_PIRQ);
518 if (!VALID_EVTCHN(evtchn))
524 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
527 bind_evtchn_to_cpu(evtchn, 0);
528 evtchn_to_irq[evtchn] = -1;
532 static void enable_pirq(unsigned int irq)
537 static void disable_pirq(unsigned int irq)
541 static void ack_pirq(unsigned int irq)
543 int evtchn = evtchn_from_irq(irq);
545 move_native_irq(irq);
547 if (VALID_EVTCHN(evtchn)) {
549 clear_evtchn(evtchn);
553 static void end_pirq(unsigned int irq)
555 int evtchn = evtchn_from_irq(irq);
556 struct irq_desc *desc = irq_to_desc(irq);
561 if ((desc->status & (IRQ_DISABLED|IRQ_PENDING)) ==
562 (IRQ_DISABLED|IRQ_PENDING)) {
564 } else if (VALID_EVTCHN(evtchn)) {
565 unmask_evtchn(evtchn);
566 pirq_unmask_notify(irq);
570 static int find_irq_by_gsi(unsigned gsi)
574 for (irq = 0; irq < nr_irqs; irq++) {
575 struct irq_info *info = info_for_irq(irq);
577 if (info == NULL || info->type != IRQT_PIRQ)
580 if (gsi_from_irq(irq) == gsi)
587 int xen_allocate_pirq(unsigned gsi, int shareable, char *name)
589 return xen_map_pirq_gsi(gsi, gsi, shareable, name);
592 /* xen_map_pirq_gsi might allocate irqs from the top down, as a
593 * consequence don't assume that the irq number returned has a low value
594 * or can be used as a pirq number unless you know otherwise.
596 * One notable exception is when xen_map_pirq_gsi is called passing an
597 * hardware gsi as argument, in that case the irq number returned
598 * matches the gsi number passed as second argument.
600 * Note: We don't assign an event channel until the irq actually started
601 * up. Return an existing irq if we've already got one for the gsi.
603 int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name)
606 struct physdev_irq irq_op;
608 spin_lock(&irq_mapping_update_lock);
610 if ((pirq > nr_pirqs) || (gsi > nr_irqs)) {
611 printk(KERN_WARNING "xen_map_pirq_gsi: %s %s is incorrect!\n",
612 pirq > nr_pirqs ? "nr_pirqs" :"",
613 gsi > nr_irqs ? "nr_irqs" : "");
617 irq = find_irq_by_gsi(gsi);
619 printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n",
621 goto out; /* XXX need refcount? */
624 /* If we are a PV guest, we don't have GSIs (no ACPI passed). Therefore
625 * we are using the !xen_initial_domain() to drop in the function.*/
626 if (identity_mapped_irq(gsi) || (!xen_initial_domain() &&
629 irq_alloc_desc_at(irq, 0);
631 irq = find_unbound_irq();
633 set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
634 handle_level_irq, name);
639 /* Only the privileged domain can do this. For non-priv, the pcifront
640 * driver provides a PCI bus that does the call to do exactly
641 * this in the priv domain. */
642 if (xen_initial_domain() &&
643 HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
649 irq_info[irq] = mk_pirq_info(0, pirq, gsi, irq_op.vector);
650 irq_info[irq].u.pirq.flags |= shareable ? PIRQ_SHAREABLE : 0;
651 pirq_to_irq[pirq] = irq;
654 spin_unlock(&irq_mapping_update_lock);
659 void xen_allocate_pirq_msi(char *name, int *irq, int *pirq)
661 spin_lock(&irq_mapping_update_lock);
663 *irq = find_unbound_irq();
667 *pirq = find_unbound_pirq();
671 set_irq_chip_and_handler_name(*irq, &xen_pirq_chip,
672 handle_level_irq, name);
674 irq_info[*irq] = mk_pirq_info(0, *pirq, 0, 0);
675 pirq_to_irq[*pirq] = *irq;
678 spin_unlock(&irq_mapping_update_lock);
681 int xen_destroy_irq(int irq)
683 struct irq_desc *desc;
684 struct physdev_unmap_pirq unmap_irq;
685 struct irq_info *info = info_for_irq(irq);
688 spin_lock(&irq_mapping_update_lock);
690 desc = irq_to_desc(irq);
694 if (xen_initial_domain()) {
695 unmap_irq.pirq = info->u.pirq.gsi;
696 unmap_irq.domid = DOMID_SELF;
697 rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
699 printk(KERN_WARNING "unmap irq failed %d\n", rc);
703 irq_info[irq] = mk_unbound_info();
708 spin_unlock(&irq_mapping_update_lock);
712 int xen_vector_from_irq(unsigned irq)
714 return vector_from_irq(irq);
717 int xen_gsi_from_irq(unsigned irq)
719 return gsi_from_irq(irq);
722 int bind_evtchn_to_irq(unsigned int evtchn)
726 spin_lock(&irq_mapping_update_lock);
728 irq = evtchn_to_irq[evtchn];
731 irq = find_unbound_irq();
733 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
734 handle_edge_irq, "event");
736 evtchn_to_irq[evtchn] = irq;
737 irq_info[irq] = mk_evtchn_info(evtchn);
740 spin_unlock(&irq_mapping_update_lock);
744 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
746 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
748 struct evtchn_bind_ipi bind_ipi;
751 spin_lock(&irq_mapping_update_lock);
753 irq = per_cpu(ipi_to_irq, cpu)[ipi];
756 irq = find_unbound_irq();
760 set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
761 handle_percpu_irq, "ipi");
764 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
767 evtchn = bind_ipi.port;
769 evtchn_to_irq[evtchn] = irq;
770 irq_info[irq] = mk_ipi_info(evtchn, ipi);
771 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
773 bind_evtchn_to_cpu(evtchn, cpu);
777 spin_unlock(&irq_mapping_update_lock);
782 static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
784 struct evtchn_bind_virq bind_virq;
787 spin_lock(&irq_mapping_update_lock);
789 irq = per_cpu(virq_to_irq, cpu)[virq];
792 bind_virq.virq = virq;
793 bind_virq.vcpu = cpu;
794 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
797 evtchn = bind_virq.port;
799 irq = find_unbound_irq();
801 set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
802 handle_percpu_irq, "virq");
804 evtchn_to_irq[evtchn] = irq;
805 irq_info[irq] = mk_virq_info(evtchn, virq);
807 per_cpu(virq_to_irq, cpu)[virq] = irq;
809 bind_evtchn_to_cpu(evtchn, cpu);
812 spin_unlock(&irq_mapping_update_lock);
817 static void unbind_from_irq(unsigned int irq)
819 struct evtchn_close close;
820 int evtchn = evtchn_from_irq(irq);
822 spin_lock(&irq_mapping_update_lock);
824 if (VALID_EVTCHN(evtchn)) {
826 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
829 switch (type_from_irq(irq)) {
831 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
832 [virq_from_irq(irq)] = -1;
835 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
836 [ipi_from_irq(irq)] = -1;
842 /* Closed ports are implicitly re-bound to VCPU0. */
843 bind_evtchn_to_cpu(evtchn, 0);
845 evtchn_to_irq[evtchn] = -1;
848 if (irq_info[irq].type != IRQT_UNBOUND) {
849 irq_info[irq] = mk_unbound_info();
854 spin_unlock(&irq_mapping_update_lock);
857 int bind_evtchn_to_irqhandler(unsigned int evtchn,
858 irq_handler_t handler,
859 unsigned long irqflags,
860 const char *devname, void *dev_id)
865 irq = bind_evtchn_to_irq(evtchn);
866 retval = request_irq(irq, handler, irqflags, devname, dev_id);
868 unbind_from_irq(irq);
874 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
876 int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
877 irq_handler_t handler,
878 unsigned long irqflags, const char *devname, void *dev_id)
883 irq = bind_virq_to_irq(virq, cpu);
884 retval = request_irq(irq, handler, irqflags, devname, dev_id);
886 unbind_from_irq(irq);
892 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
894 int bind_ipi_to_irqhandler(enum ipi_vector ipi,
896 irq_handler_t handler,
897 unsigned long irqflags,
903 irq = bind_ipi_to_irq(ipi, cpu);
907 irqflags |= IRQF_NO_SUSPEND;
908 retval = request_irq(irq, handler, irqflags, devname, dev_id);
910 unbind_from_irq(irq);
917 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
919 free_irq(irq, dev_id);
920 unbind_from_irq(irq);
922 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
924 void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
926 int irq = per_cpu(ipi_to_irq, cpu)[vector];
928 notify_remote_via_irq(irq);
931 irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
933 struct shared_info *sh = HYPERVISOR_shared_info;
934 int cpu = smp_processor_id();
937 static DEFINE_SPINLOCK(debug_lock);
939 spin_lock_irqsave(&debug_lock, flags);
941 printk("vcpu %d\n ", cpu);
943 for_each_online_cpu(i) {
944 struct vcpu_info *v = per_cpu(xen_vcpu, i);
945 printk("%d: masked=%d pending=%d event_sel %08lx\n ", i,
946 (get_irq_regs() && i == cpu) ? xen_irqs_disabled(get_irq_regs()) : v->evtchn_upcall_mask,
947 v->evtchn_upcall_pending,
948 v->evtchn_pending_sel);
950 printk("pending:\n ");
951 for(i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
952 printk("%08lx%s", sh->evtchn_pending[i],
953 i % 8 == 0 ? "\n " : " ");
954 printk("\nmasks:\n ");
955 for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
956 printk("%08lx%s", sh->evtchn_mask[i],
957 i % 8 == 0 ? "\n " : " ");
959 printk("\nunmasked:\n ");
960 for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
961 printk("%08lx%s", sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
962 i % 8 == 0 ? "\n " : " ");
964 printk("\npending list:\n");
965 for(i = 0; i < NR_EVENT_CHANNELS; i++) {
966 if (sync_test_bit(i, sh->evtchn_pending)) {
967 printk(" %d: event %d -> irq %d\n",
968 cpu_from_evtchn(i), i,
973 spin_unlock_irqrestore(&debug_lock, flags);
978 static DEFINE_PER_CPU(unsigned, xed_nesting_count);
981 * Search the CPUs pending events bitmasks. For each one found, map
982 * the event number to an irq, and feed it into do_IRQ() for
985 * Xen uses a two-level bitmap to speed searching. The first level is
986 * a bitset of words which contain pending event bits. The second
987 * level is a bitset of pending events themselves.
989 static void __xen_evtchn_do_upcall(void)
992 struct shared_info *s = HYPERVISOR_shared_info;
993 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
997 unsigned long pending_words;
999 vcpu_info->evtchn_upcall_pending = 0;
1001 if (__get_cpu_var(xed_nesting_count)++)
1004 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
1005 /* Clear master flag /before/ clearing selector flag. */
1008 pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
1009 while (pending_words != 0) {
1010 unsigned long pending_bits;
1011 int word_idx = __ffs(pending_words);
1012 pending_words &= ~(1UL << word_idx);
1014 while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) {
1015 int bit_idx = __ffs(pending_bits);
1016 int port = (word_idx * BITS_PER_LONG) + bit_idx;
1017 int irq = evtchn_to_irq[port];
1018 struct irq_desc *desc;
1021 desc = irq_to_desc(irq);
1023 generic_handle_irq_desc(irq, desc);
1028 BUG_ON(!irqs_disabled());
1030 count = __get_cpu_var(xed_nesting_count);
1031 __get_cpu_var(xed_nesting_count) = 0;
1032 } while (count != 1 || vcpu_info->evtchn_upcall_pending);
1039 void xen_evtchn_do_upcall(struct pt_regs *regs)
1041 struct pt_regs *old_regs = set_irq_regs(regs);
1046 __xen_evtchn_do_upcall();
1049 set_irq_regs(old_regs);
1052 void xen_hvm_evtchn_do_upcall(void)
1054 __xen_evtchn_do_upcall();
1056 EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
1058 /* Rebind a new event channel to an existing irq. */
1059 void rebind_evtchn_irq(int evtchn, int irq)
1061 struct irq_info *info = info_for_irq(irq);
1063 /* Make sure the irq is masked, since the new event channel
1064 will also be masked. */
1067 spin_lock(&irq_mapping_update_lock);
1069 /* After resume the irq<->evtchn mappings are all cleared out */
1070 BUG_ON(evtchn_to_irq[evtchn] != -1);
1071 /* Expect irq to have been bound before,
1072 so there should be a proper type */
1073 BUG_ON(info->type == IRQT_UNBOUND);
1075 evtchn_to_irq[evtchn] = irq;
1076 irq_info[irq] = mk_evtchn_info(evtchn);
1078 spin_unlock(&irq_mapping_update_lock);
1080 /* new event channels are always bound to cpu 0 */
1081 irq_set_affinity(irq, cpumask_of(0));
1083 /* Unmask the event channel. */
1087 /* Rebind an evtchn so that it gets delivered to a specific cpu */
1088 static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
1090 struct evtchn_bind_vcpu bind_vcpu;
1091 int evtchn = evtchn_from_irq(irq);
1093 /* events delivered via platform PCI interrupts are always
1094 * routed to vcpu 0 */
1095 if (!VALID_EVTCHN(evtchn) ||
1096 (xen_hvm_domain() && !xen_have_vector_callback))
1099 /* Send future instances of this interrupt to other vcpu. */
1100 bind_vcpu.port = evtchn;
1101 bind_vcpu.vcpu = tcpu;
1104 * If this fails, it usually just indicates that we're dealing with a
1105 * virq or IPI channel, which don't actually need to be rebound. Ignore
1106 * it, but don't do the xenlinux-level rebind in that case.
1108 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
1109 bind_evtchn_to_cpu(evtchn, tcpu);
1114 static int set_affinity_irq(unsigned irq, const struct cpumask *dest)
1116 unsigned tcpu = cpumask_first(dest);
1118 return rebind_irq_to_cpu(irq, tcpu);
1121 int resend_irq_on_evtchn(unsigned int irq)
1123 int masked, evtchn = evtchn_from_irq(irq);
1124 struct shared_info *s = HYPERVISOR_shared_info;
1126 if (!VALID_EVTCHN(evtchn))
1129 masked = sync_test_and_set_bit(evtchn, s->evtchn_mask);
1130 sync_set_bit(evtchn, s->evtchn_pending);
1132 unmask_evtchn(evtchn);
1137 static void enable_dynirq(unsigned int irq)
1139 int evtchn = evtchn_from_irq(irq);
1141 if (VALID_EVTCHN(evtchn))
1142 unmask_evtchn(evtchn);
1145 static void disable_dynirq(unsigned int irq)
1147 int evtchn = evtchn_from_irq(irq);
1149 if (VALID_EVTCHN(evtchn))
1150 mask_evtchn(evtchn);
1153 static void ack_dynirq(unsigned int irq)
1155 int evtchn = evtchn_from_irq(irq);
1157 move_native_irq(irq);
1159 if (VALID_EVTCHN(evtchn))
1160 clear_evtchn(evtchn);
1163 static int retrigger_dynirq(unsigned int irq)
1165 int evtchn = evtchn_from_irq(irq);
1166 struct shared_info *sh = HYPERVISOR_shared_info;
1169 if (VALID_EVTCHN(evtchn)) {
1172 masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask);
1173 sync_set_bit(evtchn, sh->evtchn_pending);
1175 unmask_evtchn(evtchn);
1182 static void restore_cpu_virqs(unsigned int cpu)
1184 struct evtchn_bind_virq bind_virq;
1185 int virq, irq, evtchn;
1187 for (virq = 0; virq < NR_VIRQS; virq++) {
1188 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
1191 BUG_ON(virq_from_irq(irq) != virq);
1193 /* Get a new binding from Xen. */
1194 bind_virq.virq = virq;
1195 bind_virq.vcpu = cpu;
1196 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1199 evtchn = bind_virq.port;
1201 /* Record the new mapping. */
1202 evtchn_to_irq[evtchn] = irq;
1203 irq_info[irq] = mk_virq_info(evtchn, virq);
1204 bind_evtchn_to_cpu(evtchn, cpu);
1206 /* Ready for use. */
1207 unmask_evtchn(evtchn);
1211 static void restore_cpu_ipis(unsigned int cpu)
1213 struct evtchn_bind_ipi bind_ipi;
1214 int ipi, irq, evtchn;
1216 for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
1217 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
1220 BUG_ON(ipi_from_irq(irq) != ipi);
1222 /* Get a new binding from Xen. */
1223 bind_ipi.vcpu = cpu;
1224 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
1227 evtchn = bind_ipi.port;
1229 /* Record the new mapping. */
1230 evtchn_to_irq[evtchn] = irq;
1231 irq_info[irq] = mk_ipi_info(evtchn, ipi);
1232 bind_evtchn_to_cpu(evtchn, cpu);
1234 /* Ready for use. */
1235 unmask_evtchn(evtchn);
1240 /* Clear an irq's pending state, in preparation for polling on it */
1241 void xen_clear_irq_pending(int irq)
1243 int evtchn = evtchn_from_irq(irq);
1245 if (VALID_EVTCHN(evtchn))
1246 clear_evtchn(evtchn);
1248 EXPORT_SYMBOL(xen_clear_irq_pending);
1249 void xen_set_irq_pending(int irq)
1251 int evtchn = evtchn_from_irq(irq);
1253 if (VALID_EVTCHN(evtchn))
1257 bool xen_test_irq_pending(int irq)
1259 int evtchn = evtchn_from_irq(irq);
1262 if (VALID_EVTCHN(evtchn))
1263 ret = test_evtchn(evtchn);
1268 /* Poll waiting for an irq to become pending with timeout. In the usual case,
1269 * the irq will be disabled so it won't deliver an interrupt. */
1270 void xen_poll_irq_timeout(int irq, u64 timeout)
1272 evtchn_port_t evtchn = evtchn_from_irq(irq);
1274 if (VALID_EVTCHN(evtchn)) {
1275 struct sched_poll poll;
1278 poll.timeout = timeout;
1279 set_xen_guest_handle(poll.ports, &evtchn);
1281 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
1285 EXPORT_SYMBOL(xen_poll_irq_timeout);
1286 /* Poll waiting for an irq to become pending. In the usual case, the
1287 * irq will be disabled so it won't deliver an interrupt. */
1288 void xen_poll_irq(int irq)
1290 xen_poll_irq_timeout(irq, 0 /* no timeout */);
1293 void xen_irq_resume(void)
1295 unsigned int cpu, irq, evtchn;
1297 init_evtchn_cpu_bindings();
1299 /* New event-channel space is not 'live' yet. */
1300 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1301 mask_evtchn(evtchn);
1303 /* No IRQ <-> event-channel mappings. */
1304 for (irq = 0; irq < nr_irqs; irq++)
1305 irq_info[irq].evtchn = 0; /* zap event-channel binding */
1307 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1308 evtchn_to_irq[evtchn] = -1;
1310 for_each_possible_cpu(cpu) {
1311 restore_cpu_virqs(cpu);
1312 restore_cpu_ipis(cpu);
1316 static struct irq_chip xen_dynamic_chip __read_mostly = {
1319 .disable = disable_dynirq,
1320 .mask = disable_dynirq,
1321 .unmask = enable_dynirq,
1324 .set_affinity = set_affinity_irq,
1325 .retrigger = retrigger_dynirq,
1328 static struct irq_chip xen_pirq_chip __read_mostly = {
1331 .startup = startup_pirq,
1332 .shutdown = shutdown_pirq,
1334 .enable = enable_pirq,
1335 .unmask = enable_pirq,
1337 .disable = disable_pirq,
1338 .mask = disable_pirq,
1343 .set_affinity = set_affinity_irq,
1345 .retrigger = retrigger_dynirq,
1348 static struct irq_chip xen_percpu_chip __read_mostly = {
1349 .name = "xen-percpu",
1351 .disable = disable_dynirq,
1352 .mask = disable_dynirq,
1353 .unmask = enable_dynirq,
1358 int xen_set_callback_via(uint64_t via)
1360 struct xen_hvm_param a;
1361 a.domid = DOMID_SELF;
1362 a.index = HVM_PARAM_CALLBACK_IRQ;
1364 return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
1366 EXPORT_SYMBOL_GPL(xen_set_callback_via);
1368 #ifdef CONFIG_XEN_PVHVM
1369 /* Vector callbacks are better than PCI interrupts to receive event
1370 * channel notifications because we can receive vector callbacks on any
1371 * vcpu and we don't need PCI support or APIC interactions. */
1372 void xen_callback_vector(void)
1375 uint64_t callback_via;
1376 if (xen_have_vector_callback) {
1377 callback_via = HVM_CALLBACK_VECTOR(XEN_HVM_EVTCHN_CALLBACK);
1378 rc = xen_set_callback_via(callback_via);
1380 printk(KERN_ERR "Request for Xen HVM callback vector"
1382 xen_have_vector_callback = 0;
1385 printk(KERN_INFO "Xen HVM callback vector for event delivery is "
1387 /* in the restore case the vector has already been allocated */
1388 if (!test_bit(XEN_HVM_EVTCHN_CALLBACK, used_vectors))
1389 alloc_intr_gate(XEN_HVM_EVTCHN_CALLBACK, xen_hvm_callback_vector);
1393 void xen_callback_vector(void) {}
1396 void __init xen_init_IRQ(void)
1399 struct physdev_nr_pirqs op_nr_pirqs;
1401 cpu_evtchn_mask_p = kcalloc(nr_cpu_ids, sizeof(struct cpu_evtchn_s),
1403 irq_info = kcalloc(nr_irqs, sizeof(*irq_info), GFP_KERNEL);
1405 rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_nr_pirqs, &op_nr_pirqs);
1409 printk(KERN_WARNING "PHYSDEVOP_get_nr_pirqs returned rc=%d\n", rc);
1411 if (xen_pv_domain() && !xen_initial_domain())
1412 nr_pirqs = max((int)op_nr_pirqs.nr_pirqs, nr_irqs);
1414 nr_pirqs = op_nr_pirqs.nr_pirqs;
1416 pirq_to_irq = kcalloc(nr_pirqs, sizeof(*pirq_to_irq), GFP_KERNEL);
1417 for (i = 0; i < nr_pirqs; i++)
1418 pirq_to_irq[i] = -1;
1420 evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq),
1422 for (i = 0; i < NR_EVENT_CHANNELS; i++)
1423 evtchn_to_irq[i] = -1;
1425 init_evtchn_cpu_bindings();
1427 /* No event channels are 'live' right now. */
1428 for (i = 0; i < NR_EVENT_CHANNELS; i++)
1431 if (xen_hvm_domain()) {
1432 xen_callback_vector();
1434 /* pci_xen_hvm_init must be called after native_init_IRQ so that
1435 * __acpi_register_gsi can point at the right function */
1438 irq_ctx_init(smp_processor_id());
1439 if (xen_initial_domain())