4 * Xen models interrupts with abstract event channels. Because each
5 * domain gets 1024 event channels, but NR_IRQ is not that large, we
6 * must dynamically map irqs<->event channels. The event channels
7 * interface with the rest of the kernel by defining a xen interrupt
8 * chip. When an event is recieved, it is mapped to an irq and sent
9 * through the normal interrupt processing path.
11 * There are four kinds of events which can be mapped to an event
14 * 1. Inter-domain notifications. This includes all the virtual
15 * device events, since they're driven by front-ends in another domain
17 * 2. VIRQs, typically used for timers. These are per-cpu events.
19 * 4. Hardware interrupts. Not supported at present.
21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
24 #include <linux/linkage.h>
25 #include <linux/interrupt.h>
26 #include <linux/irq.h>
27 #include <linux/module.h>
28 #include <linux/string.h>
30 #include <asm/ptrace.h>
32 #include <asm/sync_bitops.h>
33 #include <asm/xen/hypercall.h>
34 #include <asm/xen/hypervisor.h>
36 #include <xen/xen-ops.h>
37 #include <xen/events.h>
38 #include <xen/interface/xen.h>
39 #include <xen/interface/event_channel.h>
42 * This lock protects updates to the following mapping and reference-count
43 * arrays. The lock does not need to be acquired to read the mapping tables.
45 static DEFINE_SPINLOCK(irq_mapping_update_lock);
47 /* IRQ <-> VIRQ mapping. */
48 static DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1};
50 /* IRQ <-> IPI mapping */
51 static DEFINE_PER_CPU(int, ipi_to_irq[XEN_NR_IPIS]) = {[0 ... XEN_NR_IPIS-1] = -1};
53 /* Packed IRQ information: binding type, sub-type index, and event channel. */
56 unsigned short evtchn;
61 static struct packed_irq irq_info[NR_IRQS];
72 /* Convenient shorthand for packed representation of an unbound IRQ. */
73 #define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0)
75 static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
76 [0 ... NR_EVENT_CHANNELS-1] = -1
78 static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG];
79 static u8 cpu_evtchn[NR_EVENT_CHANNELS];
81 /* Reference counts for bindings to IRQs. */
82 static int irq_bindcount[NR_IRQS];
84 /* Xen will never allocate port zero for any purpose. */
85 #define VALID_EVTCHN(chn) ((chn) != 0)
87 static struct irq_chip xen_dynamic_chip;
89 /* Constructor for packed IRQ information. */
90 static inline struct packed_irq mk_irq_info(u32 type, u32 index, u32 evtchn)
92 return (struct packed_irq) { evtchn, index, type };
96 * Accessors for packed IRQ information.
98 static inline unsigned int evtchn_from_irq(int irq)
100 return irq_info[irq].evtchn;
103 static inline unsigned int index_from_irq(int irq)
105 return irq_info[irq].index;
108 static inline unsigned int type_from_irq(int irq)
110 return irq_info[irq].type;
113 static inline unsigned long active_evtchns(unsigned int cpu,
114 struct shared_info *sh,
117 return (sh->evtchn_pending[idx] &
118 cpu_evtchn_mask[cpu][idx] &
119 ~sh->evtchn_mask[idx]);
122 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
124 int irq = evtchn_to_irq[chn];
128 irq_to_desc(irq)->affinity = cpumask_of_cpu(cpu);
131 __clear_bit(chn, cpu_evtchn_mask[cpu_evtchn[chn]]);
132 __set_bit(chn, cpu_evtchn_mask[cpu]);
134 cpu_evtchn[chn] = cpu;
137 static void init_evtchn_cpu_bindings(void)
140 struct irq_desc *desc;
143 /* By default all event channels notify CPU#0. */
144 for_each_irq_desc(i, desc) {
148 desc->affinity = cpumask_of_cpu(0);
152 memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
153 memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
156 static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
158 return cpu_evtchn[evtchn];
161 static inline void clear_evtchn(int port)
163 struct shared_info *s = HYPERVISOR_shared_info;
164 sync_clear_bit(port, &s->evtchn_pending[0]);
167 static inline void set_evtchn(int port)
169 struct shared_info *s = HYPERVISOR_shared_info;
170 sync_set_bit(port, &s->evtchn_pending[0]);
173 static inline int test_evtchn(int port)
175 struct shared_info *s = HYPERVISOR_shared_info;
176 return sync_test_bit(port, &s->evtchn_pending[0]);
181 * notify_remote_via_irq - send event to remote end of event channel via irq
182 * @irq: irq of event channel to send event to
184 * Unlike notify_remote_via_evtchn(), this is safe to use across
185 * save/restore. Notifications on a broken connection are silently
188 void notify_remote_via_irq(int irq)
190 int evtchn = evtchn_from_irq(irq);
192 if (VALID_EVTCHN(evtchn))
193 notify_remote_via_evtchn(evtchn);
195 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
197 static void mask_evtchn(int port)
199 struct shared_info *s = HYPERVISOR_shared_info;
200 sync_set_bit(port, &s->evtchn_mask[0]);
203 static void unmask_evtchn(int port)
205 struct shared_info *s = HYPERVISOR_shared_info;
206 unsigned int cpu = get_cpu();
208 BUG_ON(!irqs_disabled());
210 /* Slow path (hypercall) if this is a non-local port. */
211 if (unlikely(cpu != cpu_from_evtchn(port))) {
212 struct evtchn_unmask unmask = { .port = port };
213 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
215 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
217 sync_clear_bit(port, &s->evtchn_mask[0]);
220 * The following is basically the equivalent of
221 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
222 * the interrupt edge' if the channel is masked.
224 if (sync_test_bit(port, &s->evtchn_pending[0]) &&
225 !sync_test_and_set_bit(port / BITS_PER_LONG,
226 &vcpu_info->evtchn_pending_sel))
227 vcpu_info->evtchn_upcall_pending = 1;
233 static int find_unbound_irq(void)
236 struct irq_desc *desc;
238 /* Only allocate from dynirq range */
239 for (irq = 0; irq < nr_irqs; irq++)
240 if (irq_bindcount[irq] == 0)
244 panic("No available IRQ to bind to: increase nr_irqs!\n");
246 desc = irq_to_desc_alloc_cpu(irq, 0);
247 if (WARN_ON(desc == NULL))
253 int bind_evtchn_to_irq(unsigned int evtchn)
257 spin_lock(&irq_mapping_update_lock);
259 irq = evtchn_to_irq[evtchn];
262 irq = find_unbound_irq();
264 dynamic_irq_init(irq);
265 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
266 handle_level_irq, "event");
268 evtchn_to_irq[evtchn] = irq;
269 irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
272 irq_bindcount[irq]++;
274 spin_unlock(&irq_mapping_update_lock);
278 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
280 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
282 struct evtchn_bind_ipi bind_ipi;
285 spin_lock(&irq_mapping_update_lock);
287 irq = per_cpu(ipi_to_irq, cpu)[ipi];
289 irq = find_unbound_irq();
293 dynamic_irq_init(irq);
294 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
295 handle_level_irq, "ipi");
298 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
301 evtchn = bind_ipi.port;
303 evtchn_to_irq[evtchn] = irq;
304 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
306 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
308 bind_evtchn_to_cpu(evtchn, cpu);
311 irq_bindcount[irq]++;
314 spin_unlock(&irq_mapping_update_lock);
319 static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
321 struct evtchn_bind_virq bind_virq;
324 spin_lock(&irq_mapping_update_lock);
326 irq = per_cpu(virq_to_irq, cpu)[virq];
329 bind_virq.virq = virq;
330 bind_virq.vcpu = cpu;
331 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
334 evtchn = bind_virq.port;
336 irq = find_unbound_irq();
338 dynamic_irq_init(irq);
339 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
340 handle_level_irq, "virq");
342 evtchn_to_irq[evtchn] = irq;
343 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
345 per_cpu(virq_to_irq, cpu)[virq] = irq;
347 bind_evtchn_to_cpu(evtchn, cpu);
350 irq_bindcount[irq]++;
352 spin_unlock(&irq_mapping_update_lock);
357 static void unbind_from_irq(unsigned int irq)
359 struct evtchn_close close;
360 int evtchn = evtchn_from_irq(irq);
362 spin_lock(&irq_mapping_update_lock);
364 if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
366 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
369 switch (type_from_irq(irq)) {
371 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
372 [index_from_irq(irq)] = -1;
375 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
376 [index_from_irq(irq)] = -1;
382 /* Closed ports are implicitly re-bound to VCPU0. */
383 bind_evtchn_to_cpu(evtchn, 0);
385 evtchn_to_irq[evtchn] = -1;
386 irq_info[irq] = IRQ_UNBOUND;
388 dynamic_irq_cleanup(irq);
391 spin_unlock(&irq_mapping_update_lock);
394 int bind_evtchn_to_irqhandler(unsigned int evtchn,
395 irq_handler_t handler,
396 unsigned long irqflags,
397 const char *devname, void *dev_id)
402 irq = bind_evtchn_to_irq(evtchn);
403 retval = request_irq(irq, handler, irqflags, devname, dev_id);
405 unbind_from_irq(irq);
411 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
413 int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
414 irq_handler_t handler,
415 unsigned long irqflags, const char *devname, void *dev_id)
420 irq = bind_virq_to_irq(virq, cpu);
421 retval = request_irq(irq, handler, irqflags, devname, dev_id);
423 unbind_from_irq(irq);
429 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
431 int bind_ipi_to_irqhandler(enum ipi_vector ipi,
433 irq_handler_t handler,
434 unsigned long irqflags,
440 irq = bind_ipi_to_irq(ipi, cpu);
444 retval = request_irq(irq, handler, irqflags, devname, dev_id);
446 unbind_from_irq(irq);
453 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
455 free_irq(irq, dev_id);
456 unbind_from_irq(irq);
458 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
460 void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
462 int irq = per_cpu(ipi_to_irq, cpu)[vector];
464 notify_remote_via_irq(irq);
467 irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
469 struct shared_info *sh = HYPERVISOR_shared_info;
470 int cpu = smp_processor_id();
473 static DEFINE_SPINLOCK(debug_lock);
475 spin_lock_irqsave(&debug_lock, flags);
477 printk("vcpu %d\n ", cpu);
479 for_each_online_cpu(i) {
480 struct vcpu_info *v = per_cpu(xen_vcpu, i);
481 printk("%d: masked=%d pending=%d event_sel %08lx\n ", i,
482 (get_irq_regs() && i == cpu) ? xen_irqs_disabled(get_irq_regs()) : v->evtchn_upcall_mask,
483 v->evtchn_upcall_pending,
484 v->evtchn_pending_sel);
486 printk("pending:\n ");
487 for(i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
488 printk("%08lx%s", sh->evtchn_pending[i],
489 i % 8 == 0 ? "\n " : " ");
490 printk("\nmasks:\n ");
491 for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
492 printk("%08lx%s", sh->evtchn_mask[i],
493 i % 8 == 0 ? "\n " : " ");
495 printk("\nunmasked:\n ");
496 for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
497 printk("%08lx%s", sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
498 i % 8 == 0 ? "\n " : " ");
500 printk("\npending list:\n");
501 for(i = 0; i < NR_EVENT_CHANNELS; i++) {
502 if (sync_test_bit(i, sh->evtchn_pending)) {
503 printk(" %d: event %d -> irq %d\n",
509 spin_unlock_irqrestore(&debug_lock, flags);
516 * Search the CPUs pending events bitmasks. For each one found, map
517 * the event number to an irq, and feed it into do_IRQ() for
520 * Xen uses a two-level bitmap to speed searching. The first level is
521 * a bitset of words which contain pending event bits. The second
522 * level is a bitset of pending events themselves.
524 void xen_evtchn_do_upcall(struct pt_regs *regs)
527 struct shared_info *s = HYPERVISOR_shared_info;
528 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
529 static DEFINE_PER_CPU(unsigned, nesting_count);
533 unsigned long pending_words;
535 vcpu_info->evtchn_upcall_pending = 0;
537 if (__get_cpu_var(nesting_count)++)
540 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
541 /* Clear master flag /before/ clearing selector flag. */
544 pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
545 while (pending_words != 0) {
546 unsigned long pending_bits;
547 int word_idx = __ffs(pending_words);
548 pending_words &= ~(1UL << word_idx);
550 while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) {
551 int bit_idx = __ffs(pending_bits);
552 int port = (word_idx * BITS_PER_LONG) + bit_idx;
553 int irq = evtchn_to_irq[port];
556 xen_do_IRQ(irq, regs);
560 BUG_ON(!irqs_disabled());
562 count = __get_cpu_var(nesting_count);
563 __get_cpu_var(nesting_count) = 0;
570 /* Rebind a new event channel to an existing irq. */
571 void rebind_evtchn_irq(int evtchn, int irq)
573 /* Make sure the irq is masked, since the new event channel
574 will also be masked. */
577 spin_lock(&irq_mapping_update_lock);
579 /* After resume the irq<->evtchn mappings are all cleared out */
580 BUG_ON(evtchn_to_irq[evtchn] != -1);
581 /* Expect irq to have been bound before,
582 so the bindcount should be non-0 */
583 BUG_ON(irq_bindcount[irq] == 0);
585 evtchn_to_irq[evtchn] = irq;
586 irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
588 spin_unlock(&irq_mapping_update_lock);
590 /* new event channels are always bound to cpu 0 */
591 irq_set_affinity(irq, cpumask_of_cpu(0));
593 /* Unmask the event channel. */
597 /* Rebind an evtchn so that it gets delivered to a specific cpu */
598 static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
600 struct evtchn_bind_vcpu bind_vcpu;
601 int evtchn = evtchn_from_irq(irq);
603 if (!VALID_EVTCHN(evtchn))
606 /* Send future instances of this interrupt to other vcpu. */
607 bind_vcpu.port = evtchn;
608 bind_vcpu.vcpu = tcpu;
611 * If this fails, it usually just indicates that we're dealing with a
612 * virq or IPI channel, which don't actually need to be rebound. Ignore
613 * it, but don't do the xenlinux-level rebind in that case.
615 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
616 bind_evtchn_to_cpu(evtchn, tcpu);
620 static void set_affinity_irq(unsigned irq, cpumask_t dest)
622 unsigned tcpu = first_cpu(dest);
623 rebind_irq_to_cpu(irq, tcpu);
626 int resend_irq_on_evtchn(unsigned int irq)
628 int masked, evtchn = evtchn_from_irq(irq);
629 struct shared_info *s = HYPERVISOR_shared_info;
631 if (!VALID_EVTCHN(evtchn))
634 masked = sync_test_and_set_bit(evtchn, s->evtchn_mask);
635 sync_set_bit(evtchn, s->evtchn_pending);
637 unmask_evtchn(evtchn);
642 static void enable_dynirq(unsigned int irq)
644 int evtchn = evtchn_from_irq(irq);
646 if (VALID_EVTCHN(evtchn))
647 unmask_evtchn(evtchn);
650 static void disable_dynirq(unsigned int irq)
652 int evtchn = evtchn_from_irq(irq);
654 if (VALID_EVTCHN(evtchn))
658 static void ack_dynirq(unsigned int irq)
660 int evtchn = evtchn_from_irq(irq);
662 move_native_irq(irq);
664 if (VALID_EVTCHN(evtchn))
665 clear_evtchn(evtchn);
668 static int retrigger_dynirq(unsigned int irq)
670 int evtchn = evtchn_from_irq(irq);
671 struct shared_info *sh = HYPERVISOR_shared_info;
674 if (VALID_EVTCHN(evtchn)) {
677 masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask);
678 sync_set_bit(evtchn, sh->evtchn_pending);
680 unmask_evtchn(evtchn);
687 static void restore_cpu_virqs(unsigned int cpu)
689 struct evtchn_bind_virq bind_virq;
690 int virq, irq, evtchn;
692 for (virq = 0; virq < NR_VIRQS; virq++) {
693 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
696 BUG_ON(irq_info[irq].type != IRQT_VIRQ);
697 BUG_ON(irq_info[irq].index != virq);
699 /* Get a new binding from Xen. */
700 bind_virq.virq = virq;
701 bind_virq.vcpu = cpu;
702 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
705 evtchn = bind_virq.port;
707 /* Record the new mapping. */
708 evtchn_to_irq[evtchn] = irq;
709 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
710 bind_evtchn_to_cpu(evtchn, cpu);
713 unmask_evtchn(evtchn);
717 static void restore_cpu_ipis(unsigned int cpu)
719 struct evtchn_bind_ipi bind_ipi;
720 int ipi, irq, evtchn;
722 for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
723 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
726 BUG_ON(irq_info[irq].type != IRQT_IPI);
727 BUG_ON(irq_info[irq].index != ipi);
729 /* Get a new binding from Xen. */
731 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
734 evtchn = bind_ipi.port;
736 /* Record the new mapping. */
737 evtchn_to_irq[evtchn] = irq;
738 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
739 bind_evtchn_to_cpu(evtchn, cpu);
742 unmask_evtchn(evtchn);
747 /* Clear an irq's pending state, in preparation for polling on it */
748 void xen_clear_irq_pending(int irq)
750 int evtchn = evtchn_from_irq(irq);
752 if (VALID_EVTCHN(evtchn))
753 clear_evtchn(evtchn);
756 void xen_set_irq_pending(int irq)
758 int evtchn = evtchn_from_irq(irq);
760 if (VALID_EVTCHN(evtchn))
764 bool xen_test_irq_pending(int irq)
766 int evtchn = evtchn_from_irq(irq);
769 if (VALID_EVTCHN(evtchn))
770 ret = test_evtchn(evtchn);
775 /* Poll waiting for an irq to become pending. In the usual case, the
776 irq will be disabled so it won't deliver an interrupt. */
777 void xen_poll_irq(int irq)
779 evtchn_port_t evtchn = evtchn_from_irq(irq);
781 if (VALID_EVTCHN(evtchn)) {
782 struct sched_poll poll;
786 set_xen_guest_handle(poll.ports, &evtchn);
788 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
793 void xen_irq_resume(void)
795 unsigned int cpu, irq, evtchn;
797 init_evtchn_cpu_bindings();
799 /* New event-channel space is not 'live' yet. */
800 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
803 /* No IRQ <-> event-channel mappings. */
804 for (irq = 0; irq < nr_irqs; irq++)
805 irq_info[irq].evtchn = 0; /* zap event-channel binding */
807 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
808 evtchn_to_irq[evtchn] = -1;
810 for_each_possible_cpu(cpu) {
811 restore_cpu_virqs(cpu);
812 restore_cpu_ipis(cpu);
816 static struct irq_chip xen_dynamic_chip __read_mostly = {
818 .mask = disable_dynirq,
819 .unmask = enable_dynirq,
821 .set_affinity = set_affinity_irq,
822 .retrigger = retrigger_dynirq,
825 void __init xen_init_IRQ(void)
829 init_evtchn_cpu_bindings();
831 /* No event channels are 'live' right now. */
832 for (i = 0; i < NR_EVENT_CHANNELS; i++)
835 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
836 for (i = 0; i < nr_irqs; i++)
837 irq_bindcount[i] = 0;
839 irq_ctx_init(smp_processor_id());