4 * Copyright (C) 2009 Intel Corporation, Yu Zhao <yu.zhao@intel.com>
6 * PCI Express I/O Virtualization (IOV) support.
8 * Address Translation Service 1.0
11 #include <linux/pci.h>
12 #include <linux/slab.h>
13 #include <linux/mutex.h>
14 #include <linux/export.h>
15 #include <linux/string.h>
16 #include <linux/delay.h>
17 #include <linux/pci-ats.h>
20 #define VIRTFN_ID_LEN 16
22 int pci_iov_virtfn_bus(struct pci_dev *dev, int vf_id)
26 return dev->bus->number + ((dev->devfn + dev->sriov->offset +
27 dev->sriov->stride * vf_id) >> 8);
30 int pci_iov_virtfn_devfn(struct pci_dev *dev, int vf_id)
34 return (dev->devfn + dev->sriov->offset +
35 dev->sriov->stride * vf_id) & 0xff;
39 * Per SR-IOV spec sec 3.3.10 and 3.3.11, First VF Offset and VF Stride may
40 * change when NumVFs changes.
42 * Update iov->offset and iov->stride when NumVFs is written.
44 static inline void pci_iov_set_numvfs(struct pci_dev *dev, int nr_virtfn)
46 struct pci_sriov *iov = dev->sriov;
48 pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, nr_virtfn);
49 pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
50 pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
54 * The PF consumes one bus number. NumVFs, First VF Offset, and VF Stride
55 * determine how many additional bus numbers will be consumed by VFs.
57 * Iterate over all valid NumVFs, validate offset and stride, and calculate
58 * the maximum number of bus numbers that could ever be required.
60 static int compute_max_vf_buses(struct pci_dev *dev)
62 struct pci_sriov *iov = dev->sriov;
63 int nr_virtfn, busnr, rc = 0;
65 for (nr_virtfn = iov->total_VFs; nr_virtfn; nr_virtfn--) {
66 pci_iov_set_numvfs(dev, nr_virtfn);
67 if (!iov->offset || (nr_virtfn > 1 && !iov->stride)) {
72 busnr = pci_iov_virtfn_bus(dev, nr_virtfn - 1);
73 if (busnr > iov->max_VF_buses)
74 iov->max_VF_buses = busnr;
78 pci_iov_set_numvfs(dev, 0);
82 static struct pci_bus *virtfn_add_bus(struct pci_bus *bus, int busnr)
84 struct pci_bus *child;
86 if (bus->number == busnr)
89 child = pci_find_bus(pci_domain_nr(bus), busnr);
93 child = pci_add_new_bus(bus, NULL, busnr);
97 pci_bus_insert_busn_res(child, busnr, busnr);
102 static void virtfn_remove_bus(struct pci_bus *physbus, struct pci_bus *virtbus)
104 if (physbus != virtbus && list_empty(&virtbus->devices))
105 pci_remove_bus(virtbus);
108 resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
113 return dev->sriov->barsz[resno - PCI_IOV_RESOURCES];
116 int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset)
121 char buf[VIRTFN_ID_LEN];
122 struct pci_dev *virtfn;
123 struct resource *res;
124 struct pci_sriov *iov = dev->sriov;
127 bus = virtfn_add_bus(dev->bus, pci_iov_virtfn_bus(dev, id));
131 virtfn = pci_alloc_dev(bus);
135 virtfn->devfn = pci_iov_virtfn_devfn(dev, id);
136 virtfn->vendor = dev->vendor;
137 pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_DID, &virtfn->device);
138 rc = pci_setup_device(virtfn);
142 virtfn->dev.parent = dev->dev.parent;
143 virtfn->physfn = pci_dev_get(dev);
144 virtfn->is_virtfn = 1;
145 virtfn->multifunction = 0;
147 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
148 res = &dev->resource[i + PCI_IOV_RESOURCES];
151 virtfn->resource[i].name = pci_name(virtfn);
152 virtfn->resource[i].flags = res->flags;
153 size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
154 virtfn->resource[i].start = res->start + size * id;
155 virtfn->resource[i].end = virtfn->resource[i].start + size - 1;
156 rc = request_resource(res, &virtfn->resource[i]);
161 __pci_reset_function(virtfn);
163 pci_device_add(virtfn, virtfn->bus);
165 pci_bus_add_device(virtfn);
166 sprintf(buf, "virtfn%u", id);
167 rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
170 rc = sysfs_create_link(&virtfn->dev.kobj, &dev->dev.kobj, "physfn");
174 kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE);
179 sysfs_remove_link(&dev->dev.kobj, buf);
182 pci_stop_and_remove_bus_device(virtfn);
184 virtfn_remove_bus(dev->bus, bus);
190 void pci_iov_remove_virtfn(struct pci_dev *dev, int id, int reset)
192 char buf[VIRTFN_ID_LEN];
193 struct pci_dev *virtfn;
195 virtfn = pci_get_domain_bus_and_slot(pci_domain_nr(dev->bus),
196 pci_iov_virtfn_bus(dev, id),
197 pci_iov_virtfn_devfn(dev, id));
202 device_release_driver(&virtfn->dev);
203 __pci_reset_function(virtfn);
206 sprintf(buf, "virtfn%u", id);
207 sysfs_remove_link(&dev->dev.kobj, buf);
209 * pci_stop_dev() could have been called for this virtfn already,
210 * so the directory for the virtfn may have been removed before.
211 * Double check to avoid spurious sysfs warnings.
213 if (virtfn->dev.kobj.sd)
214 sysfs_remove_link(&virtfn->dev.kobj, "physfn");
216 pci_stop_and_remove_bus_device(virtfn);
217 virtfn_remove_bus(dev->bus, virtfn->bus);
219 /* balance pci_get_domain_bus_and_slot() */
224 int __weak pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
229 int __weak pcibios_sriov_disable(struct pci_dev *pdev)
234 static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
240 struct resource *res;
241 struct pci_dev *pdev;
242 struct pci_sriov *iov = dev->sriov;
252 pci_read_config_word(dev, iov->pos + PCI_SRIOV_INITIAL_VF, &initial);
253 if (initial > iov->total_VFs ||
254 (!(iov->cap & PCI_SRIOV_CAP_VFM) && (initial != iov->total_VFs)))
257 if (nr_virtfn < 0 || nr_virtfn > iov->total_VFs ||
258 (!(iov->cap & PCI_SRIOV_CAP_VFM) && (nr_virtfn > initial)))
262 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
263 bars |= (1 << (i + PCI_IOV_RESOURCES));
264 res = &dev->resource[i + PCI_IOV_RESOURCES];
268 if (nres != iov->nres) {
269 dev_err(&dev->dev, "not enough MMIO resources for SR-IOV\n");
273 bus = pci_iov_virtfn_bus(dev, nr_virtfn - 1);
274 if (bus > dev->bus->busn_res.end) {
275 dev_err(&dev->dev, "can't enable %d VFs (bus %02x out of range of %pR)\n",
276 nr_virtfn, bus, &dev->bus->busn_res);
280 if (pci_enable_resources(dev, bars)) {
281 dev_err(&dev->dev, "SR-IOV: IOV BARS not allocated\n");
285 if (iov->link != dev->devfn) {
286 pdev = pci_get_slot(dev->bus, iov->link);
290 if (!pdev->is_physfn) {
295 rc = sysfs_create_link(&dev->dev.kobj,
296 &pdev->dev.kobj, "dep_link");
302 iov->initial_VFs = initial;
303 if (nr_virtfn < initial)
306 rc = pcibios_sriov_enable(dev, initial);
308 dev_err(&dev->dev, "failure %d from pcibios_sriov_enable()\n", rc);
312 pci_iov_set_numvfs(dev, nr_virtfn);
313 iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
314 pci_cfg_access_lock(dev);
315 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
317 pci_cfg_access_unlock(dev);
319 for (i = 0; i < initial; i++) {
320 rc = pci_iov_add_virtfn(dev, i, 0);
325 kobject_uevent(&dev->dev.kobj, KOBJ_CHANGE);
326 iov->num_VFs = nr_virtfn;
332 pci_iov_remove_virtfn(dev, i, 0);
334 pcibios_sriov_disable(dev);
336 iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
337 pci_cfg_access_lock(dev);
338 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
340 pci_cfg_access_unlock(dev);
342 if (iov->link != dev->devfn)
343 sysfs_remove_link(&dev->dev.kobj, "dep_link");
345 pci_iov_set_numvfs(dev, 0);
349 static void sriov_disable(struct pci_dev *dev)
352 struct pci_sriov *iov = dev->sriov;
357 for (i = 0; i < iov->num_VFs; i++)
358 pci_iov_remove_virtfn(dev, i, 0);
360 pcibios_sriov_disable(dev);
362 iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
363 pci_cfg_access_lock(dev);
364 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
366 pci_cfg_access_unlock(dev);
368 if (iov->link != dev->devfn)
369 sysfs_remove_link(&dev->dev.kobj, "dep_link");
372 pci_iov_set_numvfs(dev, 0);
375 static int sriov_init(struct pci_dev *dev, int pos)
382 struct pci_sriov *iov;
383 struct resource *res;
384 struct pci_dev *pdev;
386 pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &ctrl);
387 if (ctrl & PCI_SRIOV_CTRL_VFE) {
388 pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, 0);
393 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
398 if (pci_ari_enabled(dev->bus))
399 ctrl |= PCI_SRIOV_CTRL_ARI;
402 pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, ctrl);
404 pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &total);
408 pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &pgsz);
409 i = PAGE_SHIFT > 12 ? PAGE_SHIFT - 12 : 0;
410 pgsz &= ~((1 << i) - 1);
415 pci_write_config_dword(dev, pos + PCI_SRIOV_SYS_PGSIZE, pgsz);
417 iov = kzalloc(sizeof(*iov), GFP_KERNEL);
422 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
423 res = &dev->resource[i + PCI_IOV_RESOURCES];
425 * If it is already FIXED, don't change it, something
426 * (perhaps EA or header fixups) wants it this way.
428 if (res->flags & IORESOURCE_PCI_FIXED)
429 bar64 = (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
431 bar64 = __pci_read_base(dev, pci_bar_unknown, res,
432 pos + PCI_SRIOV_BAR + i * 4);
435 if (resource_size(res) & (PAGE_SIZE - 1)) {
439 iov->barsz[i] = resource_size(res);
440 res->end = res->start + resource_size(res) * total - 1;
441 dev_info(&dev->dev, "VF(n) BAR%d space: %pR (contains BAR%d for %d VFs)\n",
450 iov->total_VFs = total;
453 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
454 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
455 if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END)
456 iov->link = PCI_DEVFN(PCI_SLOT(dev->devfn), iov->link);
459 iov->dev = pci_dev_get(pdev);
463 mutex_init(&iov->lock);
467 rc = compute_max_vf_buses(dev);
477 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
478 res = &dev->resource[i + PCI_IOV_RESOURCES];
486 static void sriov_release(struct pci_dev *dev)
488 BUG_ON(dev->sriov->num_VFs);
490 if (dev != dev->sriov->dev)
491 pci_dev_put(dev->sriov->dev);
493 mutex_destroy(&dev->sriov->lock);
499 static void sriov_restore_state(struct pci_dev *dev)
503 struct pci_sriov *iov = dev->sriov;
505 pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &ctrl);
506 if (ctrl & PCI_SRIOV_CTRL_VFE)
509 for (i = PCI_IOV_RESOURCES; i <= PCI_IOV_RESOURCE_END; i++)
510 pci_update_resource(dev, i);
512 pci_write_config_dword(dev, iov->pos + PCI_SRIOV_SYS_PGSIZE, iov->pgsz);
513 pci_iov_set_numvfs(dev, iov->num_VFs);
514 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
515 if (iov->ctrl & PCI_SRIOV_CTRL_VFE)
520 * pci_iov_init - initialize the IOV capability
521 * @dev: the PCI device
523 * Returns 0 on success, or negative on failure.
525 int pci_iov_init(struct pci_dev *dev)
529 if (!pci_is_pcie(dev))
532 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
534 return sriov_init(dev, pos);
540 * pci_iov_release - release resources used by the IOV capability
541 * @dev: the PCI device
543 void pci_iov_release(struct pci_dev *dev)
550 * pci_iov_update_resource - update a VF BAR
551 * @dev: the PCI device
552 * @resno: the resource number
554 * Update a VF BAR in the SR-IOV capability of a PF.
556 void pci_iov_update_resource(struct pci_dev *dev, int resno)
558 struct pci_sriov *iov = dev->is_physfn ? dev->sriov : NULL;
559 struct resource *res = dev->resource + resno;
560 int vf_bar = resno - PCI_IOV_RESOURCES;
561 struct pci_bus_region region;
567 * The generic pci_restore_bars() path calls this for all devices,
568 * including VFs and non-SR-IOV devices. If this is not a PF, we
569 * have nothing to do.
574 pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &cmd);
575 if ((cmd & PCI_SRIOV_CTRL_VFE) && (cmd & PCI_SRIOV_CTRL_MSE)) {
576 dev_WARN(&dev->dev, "can't update enabled VF BAR%d %pR\n",
582 * Ignore unimplemented BARs, unused resource slots for 64-bit
583 * BARs, and non-movable resources, e.g., those described via
584 * Enhanced Allocation.
589 if (res->flags & IORESOURCE_UNSET)
592 if (res->flags & IORESOURCE_PCI_FIXED)
595 pcibios_resource_to_bus(dev->bus, ®ion, res);
597 new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK;
599 reg = iov->pos + PCI_SRIOV_BAR + 4 * vf_bar;
600 pci_write_config_dword(dev, reg, new);
601 if (res->flags & IORESOURCE_MEM_64) {
602 new = region.start >> 16 >> 16;
603 pci_write_config_dword(dev, reg + 4, new);
607 resource_size_t __weak pcibios_iov_resource_alignment(struct pci_dev *dev,
610 return pci_iov_resource_size(dev, resno);
614 * pci_sriov_resource_alignment - get resource alignment for VF BAR
615 * @dev: the PCI device
616 * @resno: the resource number
618 * Returns the alignment of the VF BAR found in the SR-IOV capability.
619 * This is not the same as the resource size which is defined as
620 * the VF BAR size multiplied by the number of VFs. The alignment
621 * is just the VF BAR size.
623 resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
625 return pcibios_iov_resource_alignment(dev, resno);
629 * pci_restore_iov_state - restore the state of the IOV capability
630 * @dev: the PCI device
632 void pci_restore_iov_state(struct pci_dev *dev)
635 sriov_restore_state(dev);
639 * pci_iov_bus_range - find bus range used by Virtual Function
642 * Returns max number of buses (exclude current one) used by Virtual
645 int pci_iov_bus_range(struct pci_bus *bus)
650 list_for_each_entry(dev, &bus->devices, bus_list) {
653 if (dev->sriov->max_VF_buses > max)
654 max = dev->sriov->max_VF_buses;
657 return max ? max - bus->number : 0;
661 * pci_enable_sriov - enable the SR-IOV capability
662 * @dev: the PCI device
663 * @nr_virtfn: number of virtual functions to enable
665 * Returns 0 on success, or negative on failure.
667 int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
674 return sriov_enable(dev, nr_virtfn);
676 EXPORT_SYMBOL_GPL(pci_enable_sriov);
679 * pci_disable_sriov - disable the SR-IOV capability
680 * @dev: the PCI device
682 void pci_disable_sriov(struct pci_dev *dev)
691 EXPORT_SYMBOL_GPL(pci_disable_sriov);
694 * pci_num_vf - return number of VFs associated with a PF device_release_driver
695 * @dev: the PCI device
697 * Returns number of VFs, or 0 if SR-IOV is not enabled.
699 int pci_num_vf(struct pci_dev *dev)
704 return dev->sriov->num_VFs;
706 EXPORT_SYMBOL_GPL(pci_num_vf);
709 * pci_vfs_assigned - returns number of VFs are assigned to a guest
710 * @dev: the PCI device
712 * Returns number of VFs belonging to this device that are assigned to a guest.
713 * If device is not a physical function returns 0.
715 int pci_vfs_assigned(struct pci_dev *dev)
717 struct pci_dev *vfdev;
718 unsigned int vfs_assigned = 0;
719 unsigned short dev_id;
721 /* only search if we are a PF */
726 * determine the device ID for the VFs, the vendor ID will be the
727 * same as the PF so there is no need to check for that one
729 pci_read_config_word(dev, dev->sriov->pos + PCI_SRIOV_VF_DID, &dev_id);
731 /* loop through all the VFs to see if we own any that are assigned */
732 vfdev = pci_get_device(dev->vendor, dev_id, NULL);
735 * It is considered assigned if it is a virtual function with
736 * our dev as the physical function and the assigned bit is set
738 if (vfdev->is_virtfn && (vfdev->physfn == dev) &&
739 pci_is_dev_assigned(vfdev))
742 vfdev = pci_get_device(dev->vendor, dev_id, vfdev);
747 EXPORT_SYMBOL_GPL(pci_vfs_assigned);
750 * pci_sriov_set_totalvfs -- reduce the TotalVFs available
751 * @dev: the PCI PF device
752 * @numvfs: number that should be used for TotalVFs supported
754 * Should be called from PF driver's probe routine with
755 * device's mutex held.
757 * Returns 0 if PF is an SRIOV-capable device and
758 * value of numvfs valid. If not a PF return -ENOSYS;
759 * if numvfs is invalid return -EINVAL;
760 * if VFs already enabled, return -EBUSY.
762 int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
766 if (numvfs > dev->sriov->total_VFs)
769 /* Shouldn't change if VFs already enabled */
770 if (dev->sriov->ctrl & PCI_SRIOV_CTRL_VFE)
773 dev->sriov->driver_max_VFs = numvfs;
777 EXPORT_SYMBOL_GPL(pci_sriov_set_totalvfs);
780 * pci_sriov_get_totalvfs -- get total VFs supported on this device
781 * @dev: the PCI PF device
783 * For a PCIe device with SRIOV support, return the PCIe
784 * SRIOV capability value of TotalVFs or the value of driver_max_VFs
785 * if the driver reduced it. Otherwise 0.
787 int pci_sriov_get_totalvfs(struct pci_dev *dev)
792 if (dev->sriov->driver_max_VFs)
793 return dev->sriov->driver_max_VFs;
795 return dev->sriov->total_VFs;
797 EXPORT_SYMBOL_GPL(pci_sriov_get_totalvfs);