2 * VFIO PCI interrupt handling
4 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
5 * Author: Alex Williamson <alex.williamson@redhat.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * Derived from original vfio:
12 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
13 * Author: Tom Lyon, pugs@cisco.com
16 #include <linux/device.h>
17 #include <linux/interrupt.h>
18 #include <linux/eventfd.h>
19 #include <linux/pci.h>
20 #include <linux/file.h>
21 #include <linux/poll.h>
22 #include <linux/vfio.h>
23 #include <linux/wait.h>
24 #include <linux/workqueue.h>
26 #include "vfio_pci_private.h"
32 struct vfio_pci_device *vdev;
33 struct eventfd_ctx *eventfd;
34 int (*handler)(struct vfio_pci_device *, void *);
35 void (*thread)(struct vfio_pci_device *, void *);
37 struct work_struct inject;
40 struct work_struct shutdown;
41 struct virqfd **pvirqfd;
44 static struct workqueue_struct *vfio_irqfd_cleanup_wq;
46 int __init vfio_pci_virqfd_init(void)
48 vfio_irqfd_cleanup_wq =
49 create_singlethread_workqueue("vfio-irqfd-cleanup");
50 if (!vfio_irqfd_cleanup_wq)
56 void vfio_pci_virqfd_exit(void)
58 destroy_workqueue(vfio_irqfd_cleanup_wq);
61 static void virqfd_deactivate(struct virqfd *virqfd)
63 queue_work(vfio_irqfd_cleanup_wq, &virqfd->shutdown);
66 static int virqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
68 struct virqfd *virqfd = container_of(wait, struct virqfd, wait);
69 unsigned long flags = (unsigned long)key;
72 /* An event has been signaled, call function */
73 if ((!virqfd->handler ||
74 virqfd->handler(virqfd->vdev, virqfd->data)) &&
76 schedule_work(&virqfd->inject);
80 /* The eventfd is closing, detach from VFIO */
81 virqfd_deactivate(virqfd);
86 static void virqfd_ptable_queue_proc(struct file *file,
87 wait_queue_head_t *wqh, poll_table *pt)
89 struct virqfd *virqfd = container_of(pt, struct virqfd, pt);
90 add_wait_queue(wqh, &virqfd->wait);
93 static void virqfd_shutdown(struct work_struct *work)
95 struct virqfd *virqfd = container_of(work, struct virqfd, shutdown);
96 struct virqfd **pvirqfd = virqfd->pvirqfd;
99 eventfd_ctx_remove_wait_queue(virqfd->eventfd, &virqfd->wait, &cnt);
100 flush_work(&virqfd->inject);
101 eventfd_ctx_put(virqfd->eventfd);
107 static void virqfd_inject(struct work_struct *work)
109 struct virqfd *virqfd = container_of(work, struct virqfd, inject);
111 virqfd->thread(virqfd->vdev, virqfd->data);
114 static int virqfd_enable(struct vfio_pci_device *vdev,
115 int (*handler)(struct vfio_pci_device *, void *),
116 void (*thread)(struct vfio_pci_device *, void *),
117 void *data, struct virqfd **pvirqfd, int fd)
119 struct file *file = NULL;
120 struct eventfd_ctx *ctx = NULL;
121 struct virqfd *virqfd;
128 virqfd = kzalloc(sizeof(*virqfd), GFP_KERNEL);
132 virqfd->pvirqfd = pvirqfd;
135 virqfd->handler = handler;
136 virqfd->thread = thread;
139 INIT_WORK(&virqfd->shutdown, virqfd_shutdown);
140 INIT_WORK(&virqfd->inject, virqfd_inject);
142 file = eventfd_fget(fd);
148 ctx = eventfd_ctx_fileget(file);
154 virqfd->eventfd = ctx;
157 * Install our own custom wake-up handling so we are notified via
158 * a callback whenever someone signals the underlying eventfd.
160 init_waitqueue_func_entry(&virqfd->wait, virqfd_wakeup);
161 init_poll_funcptr(&virqfd->pt, virqfd_ptable_queue_proc);
163 events = file->f_op->poll(file, &virqfd->pt);
166 * Check if there was an event already pending on the eventfd
167 * before we registered and trigger it as if we didn't miss it.
169 if (events & POLLIN) {
170 if ((!handler || handler(vdev, data)) && thread)
171 schedule_work(&virqfd->inject);
175 * Do not drop the file until the irqfd is fully initialized,
176 * otherwise we might race against the POLLHUP.
183 if (ctx && !IS_ERR(ctx))
184 eventfd_ctx_put(ctx);
186 if (file && !IS_ERR(file))
195 static void virqfd_disable(struct virqfd *virqfd)
200 virqfd_deactivate(virqfd);
202 /* Block until we know all outstanding shutdown jobs have completed. */
203 flush_workqueue(vfio_irqfd_cleanup_wq);
209 static void vfio_send_intx_eventfd(struct vfio_pci_device *vdev, void *unused)
211 if (likely(is_intx(vdev) && !vdev->virq_disabled))
212 eventfd_signal(vdev->ctx[0].trigger, 1);
215 void vfio_pci_intx_mask(struct vfio_pci_device *vdev)
217 struct pci_dev *pdev = vdev->pdev;
220 spin_lock_irqsave(&vdev->irqlock, flags);
223 * Masking can come from interrupt, ioctl, or config space
224 * via INTx disable. The latter means this can get called
225 * even when not using intx delivery. In this case, just
226 * try to have the physical bit follow the virtual bit.
228 if (unlikely(!is_intx(vdev))) {
231 } else if (!vdev->ctx[0].masked) {
233 * Can't use check_and_mask here because we always want to
234 * mask, not just when something is pending.
239 disable_irq_nosync(pdev->irq);
241 vdev->ctx[0].masked = true;
244 spin_unlock_irqrestore(&vdev->irqlock, flags);
248 * If this is triggered by an eventfd, we can't call eventfd_signal
249 * or else we'll deadlock on the eventfd wait queue. Return >0 when
250 * a signal is necessary, which can then be handled via a work queue
251 * or directly depending on the caller.
253 int vfio_pci_intx_unmask_handler(struct vfio_pci_device *vdev, void *unused)
255 struct pci_dev *pdev = vdev->pdev;
259 spin_lock_irqsave(&vdev->irqlock, flags);
262 * Unmasking comes from ioctl or config, so again, have the
263 * physical bit follow the virtual even when not using INTx.
265 if (unlikely(!is_intx(vdev))) {
268 } else if (vdev->ctx[0].masked && !vdev->virq_disabled) {
270 * A pending interrupt here would immediately trigger,
271 * but we can avoid that overhead by just re-sending
272 * the interrupt to the user.
275 if (!pci_check_and_unmask_intx(pdev))
278 enable_irq(pdev->irq);
280 vdev->ctx[0].masked = (ret > 0);
283 spin_unlock_irqrestore(&vdev->irqlock, flags);
288 void vfio_pci_intx_unmask(struct vfio_pci_device *vdev)
290 if (vfio_pci_intx_unmask_handler(vdev, NULL) > 0)
291 vfio_send_intx_eventfd(vdev, NULL);
294 static irqreturn_t vfio_intx_handler(int irq, void *dev_id)
296 struct vfio_pci_device *vdev = dev_id;
300 spin_lock_irqsave(&vdev->irqlock, flags);
302 if (!vdev->pci_2_3) {
303 disable_irq_nosync(vdev->pdev->irq);
304 vdev->ctx[0].masked = true;
306 } else if (!vdev->ctx[0].masked && /* may be shared */
307 pci_check_and_mask_intx(vdev->pdev)) {
308 vdev->ctx[0].masked = true;
312 spin_unlock_irqrestore(&vdev->irqlock, flags);
314 if (ret == IRQ_HANDLED)
315 vfio_send_intx_eventfd(vdev, NULL);
320 static int vfio_intx_enable(struct vfio_pci_device *vdev)
322 if (!is_irq_none(vdev))
325 if (!vdev->pdev->irq)
328 vdev->ctx = kzalloc(sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
333 vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX;
338 static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd)
340 struct pci_dev *pdev = vdev->pdev;
341 unsigned long irqflags = IRQF_SHARED;
342 struct eventfd_ctx *trigger;
346 if (vdev->ctx[0].trigger) {
347 free_irq(pdev->irq, vdev);
348 kfree(vdev->ctx[0].name);
349 eventfd_ctx_put(vdev->ctx[0].trigger);
350 vdev->ctx[0].trigger = NULL;
353 if (fd < 0) /* Disable only */
356 vdev->ctx[0].name = kasprintf(GFP_KERNEL, "vfio-intx(%s)",
358 if (!vdev->ctx[0].name)
361 trigger = eventfd_ctx_fdget(fd);
362 if (IS_ERR(trigger)) {
363 kfree(vdev->ctx[0].name);
364 return PTR_ERR(trigger);
370 ret = request_irq(pdev->irq, vfio_intx_handler,
371 irqflags, vdev->ctx[0].name, vdev);
373 kfree(vdev->ctx[0].name);
374 eventfd_ctx_put(trigger);
378 vdev->ctx[0].trigger = trigger;
381 * INTx disable will stick across the new irq setup,
384 spin_lock_irqsave(&vdev->irqlock, flags);
385 if (!vdev->pci_2_3 && (vdev->ctx[0].masked || vdev->virq_disabled))
386 disable_irq_nosync(pdev->irq);
387 spin_unlock_irqrestore(&vdev->irqlock, flags);
392 static void vfio_intx_disable(struct vfio_pci_device *vdev)
394 vfio_intx_set_signal(vdev, -1);
395 virqfd_disable(vdev->ctx[0].unmask);
396 virqfd_disable(vdev->ctx[0].mask);
397 vdev->irq_type = VFIO_PCI_NUM_IRQS;
405 static irqreturn_t vfio_msihandler(int irq, void *arg)
407 struct eventfd_ctx *trigger = arg;
409 eventfd_signal(trigger, 1);
413 static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
415 struct pci_dev *pdev = vdev->pdev;
418 if (!is_irq_none(vdev))
421 vdev->ctx = kzalloc(nvec * sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
428 vdev->msix = kzalloc(nvec * sizeof(struct msix_entry),
435 for (i = 0; i < nvec; i++)
436 vdev->msix[i].entry = i;
438 ret = pci_enable_msix(pdev, vdev->msix, nvec);
445 ret = pci_enable_msi_block(pdev, nvec);
452 vdev->num_ctx = nvec;
453 vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX :
454 VFIO_PCI_MSI_IRQ_INDEX;
458 * Compute the virtual hardware field for max msi vectors -
459 * it is the log base 2 of the number of vectors.
461 vdev->msi_qmax = fls(nvec * 2 - 1) - 1;
467 static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
468 int vector, int fd, bool msix)
470 struct pci_dev *pdev = vdev->pdev;
471 int irq = msix ? vdev->msix[vector].vector : pdev->irq + vector;
472 char *name = msix ? "vfio-msix" : "vfio-msi";
473 struct eventfd_ctx *trigger;
476 if (vector >= vdev->num_ctx)
479 if (vdev->ctx[vector].trigger) {
480 free_irq(irq, vdev->ctx[vector].trigger);
481 kfree(vdev->ctx[vector].name);
482 eventfd_ctx_put(vdev->ctx[vector].trigger);
483 vdev->ctx[vector].trigger = NULL;
489 vdev->ctx[vector].name = kasprintf(GFP_KERNEL, "%s[%d](%s)",
490 name, vector, pci_name(pdev));
491 if (!vdev->ctx[vector].name)
494 trigger = eventfd_ctx_fdget(fd);
495 if (IS_ERR(trigger)) {
496 kfree(vdev->ctx[vector].name);
497 return PTR_ERR(trigger);
500 ret = request_irq(irq, vfio_msihandler, 0,
501 vdev->ctx[vector].name, trigger);
503 kfree(vdev->ctx[vector].name);
504 eventfd_ctx_put(trigger);
508 vdev->ctx[vector].trigger = trigger;
513 static int vfio_msi_set_block(struct vfio_pci_device *vdev, unsigned start,
514 unsigned count, int32_t *fds, bool msix)
518 if (start + count > vdev->num_ctx)
521 for (i = 0, j = start; i < count && !ret; i++, j++) {
522 int fd = fds ? fds[i] : -1;
523 ret = vfio_msi_set_vector_signal(vdev, j, fd, msix);
527 for (--j; j >= start; j--)
528 vfio_msi_set_vector_signal(vdev, j, -1, msix);
534 static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
536 struct pci_dev *pdev = vdev->pdev;
539 vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
541 for (i = 0; i < vdev->num_ctx; i++) {
542 virqfd_disable(vdev->ctx[i].unmask);
543 virqfd_disable(vdev->ctx[i].mask);
547 pci_disable_msix(vdev->pdev);
550 pci_disable_msi(pdev);
552 vdev->irq_type = VFIO_PCI_NUM_IRQS;
560 static int vfio_pci_set_intx_unmask(struct vfio_pci_device *vdev,
561 unsigned index, unsigned start,
562 unsigned count, uint32_t flags, void *data)
564 if (!is_intx(vdev) || start != 0 || count != 1)
567 if (flags & VFIO_IRQ_SET_DATA_NONE) {
568 vfio_pci_intx_unmask(vdev);
569 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
570 uint8_t unmask = *(uint8_t *)data;
572 vfio_pci_intx_unmask(vdev);
573 } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
574 int32_t fd = *(int32_t *)data;
576 return virqfd_enable(vdev, vfio_pci_intx_unmask_handler,
577 vfio_send_intx_eventfd, NULL,
578 &vdev->ctx[0].unmask, fd);
580 virqfd_disable(vdev->ctx[0].unmask);
586 static int vfio_pci_set_intx_mask(struct vfio_pci_device *vdev,
587 unsigned index, unsigned start,
588 unsigned count, uint32_t flags, void *data)
590 if (!is_intx(vdev) || start != 0 || count != 1)
593 if (flags & VFIO_IRQ_SET_DATA_NONE) {
594 vfio_pci_intx_mask(vdev);
595 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
596 uint8_t mask = *(uint8_t *)data;
598 vfio_pci_intx_mask(vdev);
599 } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
600 return -ENOTTY; /* XXX implement me */
606 static int vfio_pci_set_intx_trigger(struct vfio_pci_device *vdev,
607 unsigned index, unsigned start,
608 unsigned count, uint32_t flags, void *data)
610 if (is_intx(vdev) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
611 vfio_intx_disable(vdev);
615 if (!(is_intx(vdev) || is_irq_none(vdev)) || start != 0 || count != 1)
618 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
619 int32_t fd = *(int32_t *)data;
623 return vfio_intx_set_signal(vdev, fd);
625 ret = vfio_intx_enable(vdev);
629 ret = vfio_intx_set_signal(vdev, fd);
631 vfio_intx_disable(vdev);
639 if (flags & VFIO_IRQ_SET_DATA_NONE) {
640 vfio_send_intx_eventfd(vdev, NULL);
641 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
642 uint8_t trigger = *(uint8_t *)data;
644 vfio_send_intx_eventfd(vdev, NULL);
649 static int vfio_pci_set_msi_trigger(struct vfio_pci_device *vdev,
650 unsigned index, unsigned start,
651 unsigned count, uint32_t flags, void *data)
654 bool msix = (index == VFIO_PCI_MSIX_IRQ_INDEX) ? true : false;
656 if (irq_is(vdev, index) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
657 vfio_msi_disable(vdev, msix);
661 if (!(irq_is(vdev, index) || is_irq_none(vdev)))
664 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
668 if (vdev->irq_type == index)
669 return vfio_msi_set_block(vdev, start, count,
672 ret = vfio_msi_enable(vdev, start + count, msix);
676 ret = vfio_msi_set_block(vdev, start, count, fds, msix);
678 vfio_msi_disable(vdev, msix);
683 if (!irq_is(vdev, index) || start + count > vdev->num_ctx)
686 for (i = start; i < start + count; i++) {
687 if (!vdev->ctx[i].trigger)
689 if (flags & VFIO_IRQ_SET_DATA_NONE) {
690 eventfd_signal(vdev->ctx[i].trigger, 1);
691 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
692 uint8_t *bools = data;
693 if (bools[i - start])
694 eventfd_signal(vdev->ctx[i].trigger, 1);
700 int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
701 unsigned index, unsigned start, unsigned count,
704 int (*func)(struct vfio_pci_device *vdev, unsigned index,
705 unsigned start, unsigned count, uint32_t flags,
709 case VFIO_PCI_INTX_IRQ_INDEX:
710 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
711 case VFIO_IRQ_SET_ACTION_MASK:
712 func = vfio_pci_set_intx_mask;
714 case VFIO_IRQ_SET_ACTION_UNMASK:
715 func = vfio_pci_set_intx_unmask;
717 case VFIO_IRQ_SET_ACTION_TRIGGER:
718 func = vfio_pci_set_intx_trigger;
722 case VFIO_PCI_MSI_IRQ_INDEX:
723 case VFIO_PCI_MSIX_IRQ_INDEX:
724 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
725 case VFIO_IRQ_SET_ACTION_MASK:
726 case VFIO_IRQ_SET_ACTION_UNMASK:
727 /* XXX Need masking support exported */
729 case VFIO_IRQ_SET_ACTION_TRIGGER:
730 func = vfio_pci_set_msi_trigger;
739 return func(vdev, index, start, count, flags, data);