2 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
3 * Author: Alex Williamson <alex.williamson@redhat.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * Derived from original vfio:
10 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
11 * Author: Tom Lyon, pugs@cisco.com
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/device.h>
17 #include <linux/eventfd.h>
18 #include <linux/file.h>
19 #include <linux/interrupt.h>
20 #include <linux/iommu.h>
21 #include <linux/module.h>
22 #include <linux/mutex.h>
23 #include <linux/notifier.h>
24 #include <linux/pci.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/slab.h>
27 #include <linux/types.h>
28 #include <linux/uaccess.h>
29 #include <linux/vfio.h>
30 #include <linux/vgaarb.h>
32 #include "vfio_pci_private.h"
34 #define DRIVER_VERSION "0.2"
35 #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
36 #define DRIVER_DESC "VFIO PCI - User Level meta-driver"
38 static char ids[1024] __initdata;
39 module_param_string(ids, ids, sizeof(ids), 0);
40 MODULE_PARM_DESC(ids, "Initial PCI IDs to add to the vfio driver, format is \"vendor:device[:subvendor[:subdevice[:class[:class_mask]]]]\" and multiple comma separated entries can be specified");
42 static bool nointxmask;
43 module_param_named(nointxmask, nointxmask, bool, S_IRUGO | S_IWUSR);
44 MODULE_PARM_DESC(nointxmask,
45 "Disable support for PCI 2.3 style INTx masking. If this resolves problems for specific devices, report lspci -vvvxxx to linux-pci@vger.kernel.org so the device can be fixed automatically via the broken_intx_masking flag.");
47 #ifdef CONFIG_VFIO_PCI_VGA
48 static bool disable_vga;
49 module_param(disable_vga, bool, S_IRUGO);
50 MODULE_PARM_DESC(disable_vga, "Disable VGA resource access through vfio-pci");
53 static bool disable_idle_d3;
54 module_param(disable_idle_d3, bool, S_IRUGO | S_IWUSR);
55 MODULE_PARM_DESC(disable_idle_d3,
56 "Disable using the PCI D3 low power state for idle, unused devices");
58 static DEFINE_MUTEX(driver_lock);
60 static inline bool vfio_vga_disabled(void)
62 #ifdef CONFIG_VFIO_PCI_VGA
70 * Our VGA arbiter participation is limited since we don't know anything
71 * about the device itself. However, if the device is the only VGA device
72 * downstream of a bridge and VFIO VGA support is disabled, then we can
73 * safely return legacy VGA IO and memory as not decoded since the user
74 * has no way to get to it and routing can be disabled externally at the
77 static unsigned int vfio_pci_set_vga_decode(void *opaque, bool single_vga)
79 struct vfio_pci_device *vdev = opaque;
80 struct pci_dev *tmp = NULL, *pdev = vdev->pdev;
81 unsigned char max_busnr;
84 if (single_vga || !vfio_vga_disabled() || pci_is_root_bus(pdev->bus))
85 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
86 VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
88 max_busnr = pci_bus_max_busnr(pdev->bus);
89 decodes = VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
91 while ((tmp = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, tmp)) != NULL) {
93 pci_domain_nr(tmp->bus) != pci_domain_nr(pdev->bus) ||
94 pci_is_root_bus(tmp->bus))
97 if (tmp->bus->number >= pdev->bus->number &&
98 tmp->bus->number <= max_busnr) {
100 decodes |= VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
108 static inline bool vfio_pci_is_vga(struct pci_dev *pdev)
110 return (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
113 static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev)
115 struct resource *res;
117 struct vfio_pci_dummy_resource *dummy_res;
119 INIT_LIST_HEAD(&vdev->dummy_resources_list);
121 for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
122 res = vdev->pdev->resource + bar;
124 if (!IS_ENABLED(CONFIG_VFIO_PCI_MMAP))
127 if (!(res->flags & IORESOURCE_MEM))
131 * The PCI core shouldn't set up a resource with a
132 * type but zero size. But there may be bugs that
133 * cause us to do that.
135 if (!resource_size(res))
138 if (resource_size(res) >= PAGE_SIZE) {
139 vdev->bar_mmap_supported[bar] = true;
143 if (!(res->start & ~PAGE_MASK)) {
145 * Add a dummy resource to reserve the remainder
146 * of the exclusive page in case that hot-add
147 * device's bar is assigned into it.
149 dummy_res = kzalloc(sizeof(*dummy_res), GFP_KERNEL);
150 if (dummy_res == NULL)
153 dummy_res->resource.name = "vfio sub-page reserved";
154 dummy_res->resource.start = res->end + 1;
155 dummy_res->resource.end = res->start + PAGE_SIZE - 1;
156 dummy_res->resource.flags = res->flags;
157 if (request_resource(res->parent,
158 &dummy_res->resource)) {
162 dummy_res->index = bar;
163 list_add(&dummy_res->res_next,
164 &vdev->dummy_resources_list);
165 vdev->bar_mmap_supported[bar] = true;
169 * Here we don't handle the case when the BAR is not page
170 * aligned because we can't expect the BAR will be
171 * assigned into the same location in a page in guest
172 * when we passthrough the BAR. And it's hard to access
173 * this BAR in userspace because we have no way to get
174 * the BAR's location in a page.
177 vdev->bar_mmap_supported[bar] = false;
181 static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev);
182 static void vfio_pci_disable(struct vfio_pci_device *vdev);
185 * INTx masking requires the ability to disable INTx signaling via PCI_COMMAND
186 * _and_ the ability detect when the device is asserting INTx via PCI_STATUS.
187 * If a device implements the former but not the latter we would typically
188 * expect broken_intx_masking be set and require an exclusive interrupt.
189 * However since we do have control of the device's ability to assert INTx,
190 * we can instead pretend that the device does not implement INTx, virtualizing
191 * the pin register to report zero and maintaining DisINTx set on the host.
193 static bool vfio_pci_nointx(struct pci_dev *pdev)
195 switch (pdev->vendor) {
196 case PCI_VENDOR_ID_INTEL:
197 switch (pdev->device) {
198 /* All i40e (XL710/X710) 10/20/40GbE NICs */
201 case 0x1580 ... 0x1581:
202 case 0x1583 ... 0x1589:
203 case 0x37d0 ... 0x37d2:
213 static int vfio_pci_enable(struct vfio_pci_device *vdev)
215 struct pci_dev *pdev = vdev->pdev;
220 pci_set_power_state(pdev, PCI_D0);
222 /* Don't allow our initial saved state to include busmaster */
223 pci_clear_master(pdev);
225 ret = pci_enable_device(pdev);
229 vdev->reset_works = (pci_reset_function(pdev) == 0);
230 pci_save_state(pdev);
231 vdev->pci_saved_state = pci_store_saved_state(pdev);
232 if (!vdev->pci_saved_state)
233 pr_debug("%s: Couldn't store %s saved state\n",
234 __func__, dev_name(&pdev->dev));
236 if (likely(!nointxmask)) {
237 if (vfio_pci_nointx(pdev)) {
238 dev_info(&pdev->dev, "Masking broken INTx support\n");
242 vdev->pci_2_3 = pci_intx_mask_supported(pdev);
245 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
246 if (vdev->pci_2_3 && (cmd & PCI_COMMAND_INTX_DISABLE)) {
247 cmd &= ~PCI_COMMAND_INTX_DISABLE;
248 pci_write_config_word(pdev, PCI_COMMAND, cmd);
251 ret = vfio_config_init(vdev);
253 kfree(vdev->pci_saved_state);
254 vdev->pci_saved_state = NULL;
255 pci_disable_device(pdev);
259 msix_pos = pdev->msix_cap;
264 pci_read_config_word(pdev, msix_pos + PCI_MSIX_FLAGS, &flags);
265 pci_read_config_dword(pdev, msix_pos + PCI_MSIX_TABLE, &table);
267 vdev->msix_bar = table & PCI_MSIX_TABLE_BIR;
268 vdev->msix_offset = table & PCI_MSIX_TABLE_OFFSET;
269 vdev->msix_size = ((flags & PCI_MSIX_FLAGS_QSIZE) + 1) * 16;
271 vdev->msix_bar = 0xFF;
273 if (!vfio_vga_disabled() && vfio_pci_is_vga(pdev))
274 vdev->has_vga = true;
277 if (vfio_pci_is_vga(pdev) &&
278 pdev->vendor == PCI_VENDOR_ID_INTEL &&
279 IS_ENABLED(CONFIG_VFIO_PCI_IGD)) {
280 ret = vfio_pci_igd_init(vdev);
282 dev_warn(&vdev->pdev->dev,
283 "Failed to setup Intel IGD regions\n");
284 vfio_pci_disable(vdev);
289 vfio_pci_probe_mmaps(vdev);
294 static void vfio_pci_disable(struct vfio_pci_device *vdev)
296 struct pci_dev *pdev = vdev->pdev;
297 struct vfio_pci_dummy_resource *dummy_res, *tmp;
300 /* Stop the device from further DMA */
301 pci_clear_master(pdev);
303 vfio_pci_set_irqs_ioctl(vdev, VFIO_IRQ_SET_DATA_NONE |
304 VFIO_IRQ_SET_ACTION_TRIGGER,
305 vdev->irq_type, 0, 0, NULL);
307 vdev->virq_disabled = false;
309 for (i = 0; i < vdev->num_regions; i++)
310 vdev->region[i].ops->release(vdev, &vdev->region[i]);
312 vdev->num_regions = 0;
314 vdev->region = NULL; /* don't krealloc a freed pointer */
316 vfio_config_free(vdev);
318 for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
319 if (!vdev->barmap[bar])
321 pci_iounmap(pdev, vdev->barmap[bar]);
322 pci_release_selected_regions(pdev, 1 << bar);
323 vdev->barmap[bar] = NULL;
326 list_for_each_entry_safe(dummy_res, tmp,
327 &vdev->dummy_resources_list, res_next) {
328 list_del(&dummy_res->res_next);
329 release_resource(&dummy_res->resource);
333 vdev->needs_reset = true;
336 * If we have saved state, restore it. If we can reset the device,
337 * even better. Resetting with current state seems better than
338 * nothing, but saving and restoring current state without reset
341 if (pci_load_and_free_saved_state(pdev, &vdev->pci_saved_state)) {
342 pr_info("%s: Couldn't reload %s saved state\n",
343 __func__, dev_name(&pdev->dev));
345 if (!vdev->reset_works)
348 pci_save_state(pdev);
352 * Disable INTx and MSI, presumably to avoid spurious interrupts
353 * during reset. Stolen from pci_reset_function()
355 pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
358 * Try to reset the device. The success of this is dependent on
359 * being able to lock the device, which is not always possible.
361 if (vdev->reset_works && !pci_try_reset_function(pdev))
362 vdev->needs_reset = false;
364 pci_restore_state(pdev);
366 pci_disable_device(pdev);
368 vfio_pci_try_bus_reset(vdev);
370 if (!disable_idle_d3)
371 pci_set_power_state(pdev, PCI_D3hot);
374 static void vfio_pci_release(void *device_data)
376 struct vfio_pci_device *vdev = device_data;
378 mutex_lock(&driver_lock);
380 if (!(--vdev->refcnt)) {
381 vfio_spapr_pci_eeh_release(vdev->pdev);
382 vfio_pci_disable(vdev);
385 mutex_unlock(&driver_lock);
387 module_put(THIS_MODULE);
390 static int vfio_pci_open(void *device_data)
392 struct vfio_pci_device *vdev = device_data;
395 if (!try_module_get(THIS_MODULE))
398 mutex_lock(&driver_lock);
401 ret = vfio_pci_enable(vdev);
405 vfio_spapr_pci_eeh_open(vdev->pdev);
409 mutex_unlock(&driver_lock);
411 module_put(THIS_MODULE);
415 static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type)
417 if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) {
419 pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin);
420 if (IS_ENABLED(CONFIG_VFIO_PCI_INTX) && !vdev->nointx && pin)
423 } else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) {
427 pos = vdev->pdev->msi_cap;
429 pci_read_config_word(vdev->pdev,
430 pos + PCI_MSI_FLAGS, &flags);
431 return 1 << ((flags & PCI_MSI_FLAGS_QMASK) >> 1);
433 } else if (irq_type == VFIO_PCI_MSIX_IRQ_INDEX) {
437 pos = vdev->pdev->msix_cap;
439 pci_read_config_word(vdev->pdev,
440 pos + PCI_MSIX_FLAGS, &flags);
442 return (flags & PCI_MSIX_FLAGS_QSIZE) + 1;
444 } else if (irq_type == VFIO_PCI_ERR_IRQ_INDEX) {
445 if (pci_is_pcie(vdev->pdev))
447 } else if (irq_type == VFIO_PCI_REQ_IRQ_INDEX) {
454 static int vfio_pci_count_devs(struct pci_dev *pdev, void *data)
460 struct vfio_pci_fill_info {
463 struct vfio_pci_dependent_device *devices;
466 static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data)
468 struct vfio_pci_fill_info *fill = data;
469 struct iommu_group *iommu_group;
471 if (fill->cur == fill->max)
472 return -EAGAIN; /* Something changed, try again */
474 iommu_group = iommu_group_get(&pdev->dev);
476 return -EPERM; /* Cannot reset non-isolated devices */
478 fill->devices[fill->cur].group_id = iommu_group_id(iommu_group);
479 fill->devices[fill->cur].segment = pci_domain_nr(pdev->bus);
480 fill->devices[fill->cur].bus = pdev->bus->number;
481 fill->devices[fill->cur].devfn = pdev->devfn;
483 iommu_group_put(iommu_group);
487 struct vfio_pci_group_entry {
488 struct vfio_group *group;
492 struct vfio_pci_group_info {
494 struct vfio_pci_group_entry *groups;
497 static int vfio_pci_validate_devs(struct pci_dev *pdev, void *data)
499 struct vfio_pci_group_info *info = data;
500 struct iommu_group *group;
503 group = iommu_group_get(&pdev->dev);
507 id = iommu_group_id(group);
509 for (i = 0; i < info->count; i++)
510 if (info->groups[i].id == id)
513 iommu_group_put(group);
515 return (i == info->count) ? -EINVAL : 0;
518 static bool vfio_pci_dev_below_slot(struct pci_dev *pdev, struct pci_slot *slot)
520 for (; pdev; pdev = pdev->bus->self)
521 if (pdev->bus == slot->bus)
522 return (pdev->slot == slot);
526 struct vfio_pci_walk_info {
527 int (*fn)(struct pci_dev *, void *data);
529 struct pci_dev *pdev;
534 static int vfio_pci_walk_wrapper(struct pci_dev *pdev, void *data)
536 struct vfio_pci_walk_info *walk = data;
538 if (!walk->slot || vfio_pci_dev_below_slot(pdev, walk->pdev->slot))
539 walk->ret = walk->fn(pdev, walk->data);
544 static int vfio_pci_for_each_slot_or_bus(struct pci_dev *pdev,
545 int (*fn)(struct pci_dev *,
546 void *data), void *data,
549 struct vfio_pci_walk_info walk = {
550 .fn = fn, .data = data, .pdev = pdev, .slot = slot, .ret = 0,
553 pci_walk_bus(pdev->bus, vfio_pci_walk_wrapper, &walk);
558 static int msix_sparse_mmap_cap(struct vfio_pci_device *vdev,
559 struct vfio_info_cap *caps)
561 struct vfio_region_info_cap_sparse_mmap *sparse;
563 int nr_areas = 2, i = 0, ret;
565 end = pci_resource_len(vdev->pdev, vdev->msix_bar);
567 /* If MSI-X table is aligned to the start or end, only one area */
568 if (((vdev->msix_offset & PAGE_MASK) == 0) ||
569 (PAGE_ALIGN(vdev->msix_offset + vdev->msix_size) >= end))
572 size = sizeof(*sparse) + (nr_areas * sizeof(*sparse->areas));
574 sparse = kzalloc(size, GFP_KERNEL);
578 sparse->nr_areas = nr_areas;
580 if (vdev->msix_offset & PAGE_MASK) {
581 sparse->areas[i].offset = 0;
582 sparse->areas[i].size = vdev->msix_offset & PAGE_MASK;
586 if (PAGE_ALIGN(vdev->msix_offset + vdev->msix_size) < end) {
587 sparse->areas[i].offset = PAGE_ALIGN(vdev->msix_offset +
589 sparse->areas[i].size = end - sparse->areas[i].offset;
593 ret = vfio_info_add_capability(caps, VFIO_REGION_INFO_CAP_SPARSE_MMAP,
600 int vfio_pci_register_dev_region(struct vfio_pci_device *vdev,
601 unsigned int type, unsigned int subtype,
602 const struct vfio_pci_regops *ops,
603 size_t size, u32 flags, void *data)
605 struct vfio_pci_region *region;
607 region = krealloc(vdev->region,
608 (vdev->num_regions + 1) * sizeof(*region),
613 vdev->region = region;
614 vdev->region[vdev->num_regions].type = type;
615 vdev->region[vdev->num_regions].subtype = subtype;
616 vdev->region[vdev->num_regions].ops = ops;
617 vdev->region[vdev->num_regions].size = size;
618 vdev->region[vdev->num_regions].flags = flags;
619 vdev->region[vdev->num_regions].data = data;
626 static long vfio_pci_ioctl(void *device_data,
627 unsigned int cmd, unsigned long arg)
629 struct vfio_pci_device *vdev = device_data;
632 if (cmd == VFIO_DEVICE_GET_INFO) {
633 struct vfio_device_info info;
635 minsz = offsetofend(struct vfio_device_info, num_irqs);
637 if (copy_from_user(&info, (void __user *)arg, minsz))
640 if (info.argsz < minsz)
643 info.flags = VFIO_DEVICE_FLAGS_PCI;
645 if (vdev->reset_works)
646 info.flags |= VFIO_DEVICE_FLAGS_RESET;
648 info.num_regions = VFIO_PCI_NUM_REGIONS + vdev->num_regions;
649 info.num_irqs = VFIO_PCI_NUM_IRQS;
651 return copy_to_user((void __user *)arg, &info, minsz) ?
654 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
655 struct pci_dev *pdev = vdev->pdev;
656 struct vfio_region_info info;
657 struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
660 minsz = offsetofend(struct vfio_region_info, offset);
662 if (copy_from_user(&info, (void __user *)arg, minsz))
665 if (info.argsz < minsz)
668 switch (info.index) {
669 case VFIO_PCI_CONFIG_REGION_INDEX:
670 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
671 info.size = pdev->cfg_size;
672 info.flags = VFIO_REGION_INFO_FLAG_READ |
673 VFIO_REGION_INFO_FLAG_WRITE;
675 case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
676 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
677 info.size = pci_resource_len(pdev, info.index);
683 info.flags = VFIO_REGION_INFO_FLAG_READ |
684 VFIO_REGION_INFO_FLAG_WRITE;
685 if (vdev->bar_mmap_supported[info.index]) {
686 info.flags |= VFIO_REGION_INFO_FLAG_MMAP;
687 if (info.index == vdev->msix_bar) {
688 ret = msix_sparse_mmap_cap(vdev, &caps);
695 case VFIO_PCI_ROM_REGION_INDEX:
700 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
703 /* Report the BAR size, not the ROM size */
704 info.size = pci_resource_len(pdev, info.index);
706 /* Shadow ROMs appear as PCI option ROMs */
707 if (pdev->resource[PCI_ROM_RESOURCE].flags &
708 IORESOURCE_ROM_SHADOW)
714 /* Is it really there? */
715 io = pci_map_rom(pdev, &size);
720 pci_unmap_rom(pdev, io);
722 info.flags = VFIO_REGION_INFO_FLAG_READ;
725 case VFIO_PCI_VGA_REGION_INDEX:
729 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
731 info.flags = VFIO_REGION_INFO_FLAG_READ |
732 VFIO_REGION_INFO_FLAG_WRITE;
737 struct vfio_region_info_cap_type cap_type;
740 VFIO_PCI_NUM_REGIONS + vdev->num_regions)
743 i = info.index - VFIO_PCI_NUM_REGIONS;
745 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
746 info.size = vdev->region[i].size;
747 info.flags = vdev->region[i].flags;
749 cap_type.type = vdev->region[i].type;
750 cap_type.subtype = vdev->region[i].subtype;
752 ret = vfio_info_add_capability(&caps,
753 VFIO_REGION_INFO_CAP_TYPE,
762 info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
763 if (info.argsz < sizeof(info) + caps.size) {
764 info.argsz = sizeof(info) + caps.size;
767 vfio_info_cap_shift(&caps, sizeof(info));
768 if (copy_to_user((void __user *)arg +
769 sizeof(info), caps.buf,
774 info.cap_offset = sizeof(info);
780 return copy_to_user((void __user *)arg, &info, minsz) ?
783 } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
784 struct vfio_irq_info info;
786 minsz = offsetofend(struct vfio_irq_info, count);
788 if (copy_from_user(&info, (void __user *)arg, minsz))
791 if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
794 switch (info.index) {
795 case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX:
796 case VFIO_PCI_REQ_IRQ_INDEX:
798 case VFIO_PCI_ERR_IRQ_INDEX:
799 if (pci_is_pcie(vdev->pdev))
801 /* pass thru to return error */
806 info.flags = VFIO_IRQ_INFO_EVENTFD;
808 info.count = vfio_pci_get_irq_count(vdev, info.index);
810 if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
811 info.flags |= (VFIO_IRQ_INFO_MASKABLE |
812 VFIO_IRQ_INFO_AUTOMASKED);
814 info.flags |= VFIO_IRQ_INFO_NORESIZE;
816 return copy_to_user((void __user *)arg, &info, minsz) ?
819 } else if (cmd == VFIO_DEVICE_SET_IRQS) {
820 struct vfio_irq_set hdr;
823 size_t data_size = 0;
825 minsz = offsetofend(struct vfio_irq_set, count);
827 if (copy_from_user(&hdr, (void __user *)arg, minsz))
830 max = vfio_pci_get_irq_count(vdev, hdr.index);
832 ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
833 VFIO_PCI_NUM_IRQS, &data_size);
838 data = memdup_user((void __user *)(arg + minsz),
841 return PTR_ERR(data);
844 mutex_lock(&vdev->igate);
846 ret = vfio_pci_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
847 hdr.start, hdr.count, data);
849 mutex_unlock(&vdev->igate);
854 } else if (cmd == VFIO_DEVICE_RESET) {
855 return vdev->reset_works ?
856 pci_try_reset_function(vdev->pdev) : -EINVAL;
858 } else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) {
859 struct vfio_pci_hot_reset_info hdr;
860 struct vfio_pci_fill_info fill = { 0 };
861 struct vfio_pci_dependent_device *devices = NULL;
865 minsz = offsetofend(struct vfio_pci_hot_reset_info, count);
867 if (copy_from_user(&hdr, (void __user *)arg, minsz))
870 if (hdr.argsz < minsz)
875 /* Can we do a slot or bus reset or neither? */
876 if (!pci_probe_reset_slot(vdev->pdev->slot))
878 else if (pci_probe_reset_bus(vdev->pdev->bus))
881 /* How many devices are affected? */
882 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
888 WARN_ON(!fill.max); /* Should always be at least one */
891 * If there's enough space, fill it now, otherwise return
892 * -ENOSPC and the number of devices affected.
894 if (hdr.argsz < sizeof(hdr) + (fill.max * sizeof(*devices))) {
896 hdr.count = fill.max;
897 goto reset_info_exit;
900 devices = kcalloc(fill.max, sizeof(*devices), GFP_KERNEL);
904 fill.devices = devices;
906 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
911 * If a device was removed between counting and filling,
912 * we may come up short of fill.max. If a device was
913 * added, we'll have a return of -EAGAIN above.
916 hdr.count = fill.cur;
919 if (copy_to_user((void __user *)arg, &hdr, minsz))
923 if (copy_to_user((void __user *)(arg + minsz), devices,
924 hdr.count * sizeof(*devices)))
931 } else if (cmd == VFIO_DEVICE_PCI_HOT_RESET) {
932 struct vfio_pci_hot_reset hdr;
934 struct vfio_pci_group_entry *groups;
935 struct vfio_pci_group_info info;
937 int i, count = 0, ret = 0;
939 minsz = offsetofend(struct vfio_pci_hot_reset, count);
941 if (copy_from_user(&hdr, (void __user *)arg, minsz))
944 if (hdr.argsz < minsz || hdr.flags)
947 /* Can we do a slot or bus reset or neither? */
948 if (!pci_probe_reset_slot(vdev->pdev->slot))
950 else if (pci_probe_reset_bus(vdev->pdev->bus))
954 * We can't let userspace give us an arbitrarily large
955 * buffer to copy, so verify how many we think there
956 * could be. Note groups can have multiple devices so
957 * one group per device is the max.
959 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
965 /* Somewhere between 1 and count is OK */
966 if (!hdr.count || hdr.count > count)
969 group_fds = kcalloc(hdr.count, sizeof(*group_fds), GFP_KERNEL);
970 groups = kcalloc(hdr.count, sizeof(*groups), GFP_KERNEL);
971 if (!group_fds || !groups) {
977 if (copy_from_user(group_fds, (void __user *)(arg + minsz),
978 hdr.count * sizeof(*group_fds))) {
985 * For each group_fd, get the group through the vfio external
986 * user interface and store the group and iommu ID. This
987 * ensures the group is held across the reset.
989 for (i = 0; i < hdr.count; i++) {
990 struct vfio_group *group;
991 struct fd f = fdget(group_fds[i]);
997 group = vfio_group_get_external_user(f.file);
1000 ret = PTR_ERR(group);
1004 groups[i].group = group;
1005 groups[i].id = vfio_external_user_iommu_id(group);
1010 /* release reference to groups on error */
1012 goto hot_reset_release;
1014 info.count = hdr.count;
1015 info.groups = groups;
1018 * Test whether all the affected devices are contained
1019 * by the set of groups provided by the user.
1021 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
1022 vfio_pci_validate_devs,
1025 /* User has access, do the reset */
1026 ret = slot ? pci_try_reset_slot(vdev->pdev->slot) :
1027 pci_try_reset_bus(vdev->pdev->bus);
1030 for (i--; i >= 0; i--)
1031 vfio_group_put_external_user(groups[i].group);
1040 static ssize_t vfio_pci_rw(void *device_data, char __user *buf,
1041 size_t count, loff_t *ppos, bool iswrite)
1043 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
1044 struct vfio_pci_device *vdev = device_data;
1046 if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
1050 case VFIO_PCI_CONFIG_REGION_INDEX:
1051 return vfio_pci_config_rw(vdev, buf, count, ppos, iswrite);
1053 case VFIO_PCI_ROM_REGION_INDEX:
1056 return vfio_pci_bar_rw(vdev, buf, count, ppos, false);
1058 case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
1059 return vfio_pci_bar_rw(vdev, buf, count, ppos, iswrite);
1061 case VFIO_PCI_VGA_REGION_INDEX:
1062 return vfio_pci_vga_rw(vdev, buf, count, ppos, iswrite);
1064 index -= VFIO_PCI_NUM_REGIONS;
1065 return vdev->region[index].ops->rw(vdev, buf,
1066 count, ppos, iswrite);
1072 static ssize_t vfio_pci_read(void *device_data, char __user *buf,
1073 size_t count, loff_t *ppos)
1078 return vfio_pci_rw(device_data, buf, count, ppos, false);
1081 static ssize_t vfio_pci_write(void *device_data, const char __user *buf,
1082 size_t count, loff_t *ppos)
1087 return vfio_pci_rw(device_data, (char __user *)buf, count, ppos, true);
1090 static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
1092 struct vfio_pci_device *vdev = device_data;
1093 struct pci_dev *pdev = vdev->pdev;
1095 u64 phys_len, req_len, pgoff, req_start;
1098 index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
1100 if (vma->vm_end < vma->vm_start)
1102 if ((vma->vm_flags & VM_SHARED) == 0)
1104 if (index >= VFIO_PCI_ROM_REGION_INDEX)
1106 if (!vdev->bar_mmap_supported[index])
1109 phys_len = PAGE_ALIGN(pci_resource_len(pdev, index));
1110 req_len = vma->vm_end - vma->vm_start;
1111 pgoff = vma->vm_pgoff &
1112 ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
1113 req_start = pgoff << PAGE_SHIFT;
1115 if (req_start + req_len > phys_len)
1118 if (index == vdev->msix_bar) {
1120 * Disallow mmaps overlapping the MSI-X table; users don't
1121 * get to touch this directly. We could find somewhere
1122 * else to map the overlap, but page granularity is only
1123 * a recommendation, not a requirement, so the user needs
1124 * to know which bits are real. Requiring them to mmap
1125 * around the table makes that clear.
1128 /* If neither entirely above nor below, then it overlaps */
1129 if (!(req_start >= vdev->msix_offset + vdev->msix_size ||
1130 req_start + req_len <= vdev->msix_offset))
1135 * Even though we don't make use of the barmap for the mmap,
1136 * we need to request the region and the barmap tracks that.
1138 if (!vdev->barmap[index]) {
1139 ret = pci_request_selected_regions(pdev,
1140 1 << index, "vfio-pci");
1144 vdev->barmap[index] = pci_iomap(pdev, index, 0);
1145 if (!vdev->barmap[index]) {
1146 pci_release_selected_regions(pdev, 1 << index);
1151 vma->vm_private_data = vdev;
1152 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1153 vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
1155 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
1156 req_len, vma->vm_page_prot);
1159 static void vfio_pci_request(void *device_data, unsigned int count)
1161 struct vfio_pci_device *vdev = device_data;
1163 mutex_lock(&vdev->igate);
1165 if (vdev->req_trigger) {
1167 dev_notice_ratelimited(&vdev->pdev->dev,
1168 "Relaying device request to user (#%u)\n",
1170 eventfd_signal(vdev->req_trigger, 1);
1171 } else if (count == 0) {
1172 dev_warn(&vdev->pdev->dev,
1173 "No device request channel registered, blocked until released by user\n");
1176 mutex_unlock(&vdev->igate);
1179 static const struct vfio_device_ops vfio_pci_ops = {
1181 .open = vfio_pci_open,
1182 .release = vfio_pci_release,
1183 .ioctl = vfio_pci_ioctl,
1184 .read = vfio_pci_read,
1185 .write = vfio_pci_write,
1186 .mmap = vfio_pci_mmap,
1187 .request = vfio_pci_request,
1190 static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1192 struct vfio_pci_device *vdev;
1193 struct iommu_group *group;
1196 if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
1199 group = vfio_iommu_group_get(&pdev->dev);
1203 vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
1205 vfio_iommu_group_put(group, &pdev->dev);
1210 vdev->irq_type = VFIO_PCI_NUM_IRQS;
1211 mutex_init(&vdev->igate);
1212 spin_lock_init(&vdev->irqlock);
1214 ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
1216 vfio_iommu_group_put(group, &pdev->dev);
1221 if (vfio_pci_is_vga(pdev)) {
1222 vga_client_register(pdev, vdev, NULL, vfio_pci_set_vga_decode);
1223 vga_set_legacy_decoding(pdev,
1224 vfio_pci_set_vga_decode(vdev, false));
1227 if (!disable_idle_d3) {
1229 * pci-core sets the device power state to an unknown value at
1230 * bootup and after being removed from a driver. The only
1231 * transition it allows from this unknown state is to D0, which
1232 * typically happens when a driver calls pci_enable_device().
1233 * We're not ready to enable the device yet, but we do want to
1234 * be able to get to D3. Therefore first do a D0 transition
1235 * before going to D3.
1237 pci_set_power_state(pdev, PCI_D0);
1238 pci_set_power_state(pdev, PCI_D3hot);
1244 static void vfio_pci_remove(struct pci_dev *pdev)
1246 struct vfio_pci_device *vdev;
1248 vdev = vfio_del_group_dev(&pdev->dev);
1252 vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev);
1253 kfree(vdev->region);
1256 if (vfio_pci_is_vga(pdev)) {
1257 vga_client_register(pdev, NULL, NULL, NULL);
1258 vga_set_legacy_decoding(pdev,
1259 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
1260 VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM);
1263 if (!disable_idle_d3)
1264 pci_set_power_state(pdev, PCI_D0);
1267 static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
1268 pci_channel_state_t state)
1270 struct vfio_pci_device *vdev;
1271 struct vfio_device *device;
1273 device = vfio_device_get_from_dev(&pdev->dev);
1275 return PCI_ERS_RESULT_DISCONNECT;
1277 vdev = vfio_device_data(device);
1279 vfio_device_put(device);
1280 return PCI_ERS_RESULT_DISCONNECT;
1283 mutex_lock(&vdev->igate);
1285 if (vdev->err_trigger)
1286 eventfd_signal(vdev->err_trigger, 1);
1288 mutex_unlock(&vdev->igate);
1290 vfio_device_put(device);
1292 return PCI_ERS_RESULT_CAN_RECOVER;
1295 static const struct pci_error_handlers vfio_err_handlers = {
1296 .error_detected = vfio_pci_aer_err_detected,
1299 static struct pci_driver vfio_pci_driver = {
1301 .id_table = NULL, /* only dynamic ids */
1302 .probe = vfio_pci_probe,
1303 .remove = vfio_pci_remove,
1304 .err_handler = &vfio_err_handlers,
1307 struct vfio_devices {
1308 struct vfio_device **devices;
1313 static int vfio_pci_get_devs(struct pci_dev *pdev, void *data)
1315 struct vfio_devices *devs = data;
1316 struct vfio_device *device;
1318 if (devs->cur_index == devs->max_index)
1321 device = vfio_device_get_from_dev(&pdev->dev);
1325 if (pci_dev_driver(pdev) != &vfio_pci_driver) {
1326 vfio_device_put(device);
1330 devs->devices[devs->cur_index++] = device;
1335 * Attempt to do a bus/slot reset if there are devices affected by a reset for
1336 * this device that are needs_reset and all of the affected devices are unused
1337 * (!refcnt). Callers are required to hold driver_lock when calling this to
1338 * prevent device opens and concurrent bus reset attempts. We prevent device
1339 * unbinds by acquiring and holding a reference to the vfio_device.
1341 * NB: vfio-core considers a group to be viable even if some devices are
1342 * bound to drivers like pci-stub or pcieport. Here we require all devices
1343 * to be bound to vfio_pci since that's the only way we can be sure they
1346 static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev)
1348 struct vfio_devices devs = { .cur_index = 0 };
1349 int i = 0, ret = -EINVAL;
1350 bool needs_reset = false, slot = false;
1351 struct vfio_pci_device *tmp;
1353 if (!pci_probe_reset_slot(vdev->pdev->slot))
1355 else if (pci_probe_reset_bus(vdev->pdev->bus))
1358 if (vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_count_devs,
1363 devs.devices = kcalloc(i, sizeof(struct vfio_device *), GFP_KERNEL);
1367 if (vfio_pci_for_each_slot_or_bus(vdev->pdev,
1368 vfio_pci_get_devs, &devs, slot))
1371 for (i = 0; i < devs.cur_index; i++) {
1372 tmp = vfio_device_data(devs.devices[i]);
1373 if (tmp->needs_reset)
1380 ret = slot ? pci_try_reset_slot(vdev->pdev->slot) :
1381 pci_try_reset_bus(vdev->pdev->bus);
1384 for (i = 0; i < devs.cur_index; i++) {
1385 tmp = vfio_device_data(devs.devices[i]);
1387 tmp->needs_reset = false;
1389 if (!tmp->refcnt && !disable_idle_d3)
1390 pci_set_power_state(tmp->pdev, PCI_D3hot);
1392 vfio_device_put(devs.devices[i]);
1395 kfree(devs.devices);
1398 static void __exit vfio_pci_cleanup(void)
1400 pci_unregister_driver(&vfio_pci_driver);
1401 vfio_pci_uninit_perm_bits();
1404 static void __init vfio_pci_fill_ids(void)
1409 /* no ids passed actually */
1413 /* add ids specified in the module parameter */
1415 while ((id = strsep(&p, ","))) {
1416 unsigned int vendor, device, subvendor = PCI_ANY_ID,
1417 subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
1423 fields = sscanf(id, "%x:%x:%x:%x:%x:%x",
1424 &vendor, &device, &subvendor, &subdevice,
1425 &class, &class_mask);
1428 pr_warn("invalid id string \"%s\"\n", id);
1432 rc = pci_add_dynid(&vfio_pci_driver, vendor, device,
1433 subvendor, subdevice, class, class_mask, 0);
1435 pr_warn("failed to add dynamic id [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x (%d)\n",
1436 vendor, device, subvendor, subdevice,
1437 class, class_mask, rc);
1439 pr_info("add [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x\n",
1440 vendor, device, subvendor, subdevice,
1445 static int __init vfio_pci_init(void)
1449 /* Allocate shared config space permision data used by all devices */
1450 ret = vfio_pci_init_perm_bits();
1454 /* Register and scan for devices */
1455 ret = pci_register_driver(&vfio_pci_driver);
1459 vfio_pci_fill_ids();
1464 vfio_pci_uninit_perm_bits();
1468 module_init(vfio_pci_init);
1469 module_exit(vfio_pci_cleanup);
1471 MODULE_VERSION(DRIVER_VERSION);
1472 MODULE_LICENSE("GPL v2");
1473 MODULE_AUTHOR(DRIVER_AUTHOR);
1474 MODULE_DESCRIPTION(DRIVER_DESC);