2 * VFIO PCI config space virtualization
4 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
5 * Author: Alex Williamson <alex.williamson@redhat.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * Derived from original vfio:
12 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
13 * Author: Tom Lyon, pugs@cisco.com
17 * This code handles reading and writing of PCI configuration registers.
18 * This is hairy because we want to allow a lot of flexibility to the
19 * user driver, but cannot trust it with all of the config fields.
20 * Tables determine which fields can be read and written, as well as
21 * which fields are 'virtualized' - special actions and translations to
22 * make it appear to the user that he has control, when in fact things
23 * must be negotiated with the underlying OS.
27 #include <linux/pci.h>
28 #include <linux/uaccess.h>
29 #include <linux/vfio.h>
30 #include <linux/slab.h>
32 #include "vfio_pci_private.h"
34 #define PCI_CFG_SPACE_SIZE 256
36 /* Fake capability ID for standard config space */
37 #define PCI_CAP_ID_BASIC 0
39 #define is_bar(offset) \
40 ((offset >= PCI_BASE_ADDRESS_0 && offset < PCI_BASE_ADDRESS_5 + 4) || \
41 (offset >= PCI_ROM_ADDRESS && offset < PCI_ROM_ADDRESS + 4))
44 * Lengths of PCI Config Capabilities
45 * 0: Removed from the user visible capability list
48 static const u8 pci_cap_length[PCI_CAP_ID_MAX + 1] = {
49 [PCI_CAP_ID_BASIC] = PCI_STD_HEADER_SIZEOF, /* pci config header */
50 [PCI_CAP_ID_PM] = PCI_PM_SIZEOF,
51 [PCI_CAP_ID_AGP] = PCI_AGP_SIZEOF,
52 [PCI_CAP_ID_VPD] = PCI_CAP_VPD_SIZEOF,
53 [PCI_CAP_ID_SLOTID] = 0, /* bridge - don't care */
54 [PCI_CAP_ID_MSI] = 0xFF, /* 10, 14, 20, or 24 */
55 [PCI_CAP_ID_CHSWP] = 0, /* cpci - not yet */
56 [PCI_CAP_ID_PCIX] = 0xFF, /* 8 or 24 */
57 [PCI_CAP_ID_HT] = 0xFF, /* hypertransport */
58 [PCI_CAP_ID_VNDR] = 0xFF, /* variable */
59 [PCI_CAP_ID_DBG] = 0, /* debug - don't care */
60 [PCI_CAP_ID_CCRC] = 0, /* cpci - not yet */
61 [PCI_CAP_ID_SHPC] = 0, /* hotswap - not yet */
62 [PCI_CAP_ID_SSVID] = 0, /* bridge - don't care */
63 [PCI_CAP_ID_AGP3] = 0, /* AGP8x - not yet */
64 [PCI_CAP_ID_SECDEV] = 0, /* secure device not yet */
65 [PCI_CAP_ID_EXP] = 0xFF, /* 20 or 44 */
66 [PCI_CAP_ID_MSIX] = PCI_CAP_MSIX_SIZEOF,
67 [PCI_CAP_ID_SATA] = 0xFF,
68 [PCI_CAP_ID_AF] = PCI_CAP_AF_SIZEOF,
72 * Lengths of PCIe/PCI-X Extended Config Capabilities
73 * 0: Removed or masked from the user visible capability list
76 static const u16 pci_ext_cap_length[PCI_EXT_CAP_ID_MAX + 1] = {
77 [PCI_EXT_CAP_ID_ERR] = PCI_ERR_ROOT_COMMAND,
78 [PCI_EXT_CAP_ID_VC] = 0xFF,
79 [PCI_EXT_CAP_ID_DSN] = PCI_EXT_CAP_DSN_SIZEOF,
80 [PCI_EXT_CAP_ID_PWR] = PCI_EXT_CAP_PWR_SIZEOF,
81 [PCI_EXT_CAP_ID_RCLD] = 0, /* root only - don't care */
82 [PCI_EXT_CAP_ID_RCILC] = 0, /* root only - don't care */
83 [PCI_EXT_CAP_ID_RCEC] = 0, /* root only - don't care */
84 [PCI_EXT_CAP_ID_MFVC] = 0xFF,
85 [PCI_EXT_CAP_ID_VC9] = 0xFF, /* same as CAP_ID_VC */
86 [PCI_EXT_CAP_ID_RCRB] = 0, /* root only - don't care */
87 [PCI_EXT_CAP_ID_VNDR] = 0xFF,
88 [PCI_EXT_CAP_ID_CAC] = 0, /* obsolete */
89 [PCI_EXT_CAP_ID_ACS] = 0xFF,
90 [PCI_EXT_CAP_ID_ARI] = PCI_EXT_CAP_ARI_SIZEOF,
91 [PCI_EXT_CAP_ID_ATS] = PCI_EXT_CAP_ATS_SIZEOF,
92 [PCI_EXT_CAP_ID_SRIOV] = PCI_EXT_CAP_SRIOV_SIZEOF,
93 [PCI_EXT_CAP_ID_MRIOV] = 0, /* not yet */
94 [PCI_EXT_CAP_ID_MCAST] = PCI_EXT_CAP_MCAST_ENDPOINT_SIZEOF,
95 [PCI_EXT_CAP_ID_PRI] = PCI_EXT_CAP_PRI_SIZEOF,
96 [PCI_EXT_CAP_ID_AMD_XXX] = 0, /* not yet */
97 [PCI_EXT_CAP_ID_REBAR] = 0xFF,
98 [PCI_EXT_CAP_ID_DPA] = 0xFF,
99 [PCI_EXT_CAP_ID_TPH] = 0xFF,
100 [PCI_EXT_CAP_ID_LTR] = PCI_EXT_CAP_LTR_SIZEOF,
101 [PCI_EXT_CAP_ID_SECPCI] = 0, /* not yet */
102 [PCI_EXT_CAP_ID_PMUX] = 0, /* not yet */
103 [PCI_EXT_CAP_ID_PASID] = 0, /* not yet */
107 * Read/Write Permission Bits - one bit for each bit in capability
108 * Any field can be read if it exists, but what is read depends on
109 * whether the field is 'virtualized', or just pass thru to the
110 * hardware. Any virtualized field is also virtualized for writes.
111 * Writes are only permitted if they have a 1 bit here.
114 u8 *virt; /* read/write virtual data, not hw */
115 u8 *write; /* writeable bits */
116 int (*readfn)(struct vfio_pci_device *vdev, int pos, int count,
117 struct perm_bits *perm, int offset, __le32 *val);
118 int (*writefn)(struct vfio_pci_device *vdev, int pos, int count,
119 struct perm_bits *perm, int offset, __le32 val);
123 #define ALL_VIRT 0xFFFFFFFFU
125 #define ALL_WRITE 0xFFFFFFFFU
127 static int vfio_user_config_read(struct pci_dev *pdev, int offset,
128 __le32 *val, int count)
137 ret = pci_user_read_config_byte(pdev, offset, &tmp);
144 ret = pci_user_read_config_word(pdev, offset, &tmp);
149 ret = pci_user_read_config_dword(pdev, offset, &tmp_val);
153 *val = cpu_to_le32(tmp_val);
155 return pcibios_err_to_errno(ret);
158 static int vfio_user_config_write(struct pci_dev *pdev, int offset,
159 __le32 val, int count)
162 u32 tmp_val = le32_to_cpu(val);
166 ret = pci_user_write_config_byte(pdev, offset, tmp_val);
169 ret = pci_user_write_config_word(pdev, offset, tmp_val);
172 ret = pci_user_write_config_dword(pdev, offset, tmp_val);
176 return pcibios_err_to_errno(ret);
179 static int vfio_default_config_read(struct vfio_pci_device *vdev, int pos,
180 int count, struct perm_bits *perm,
181 int offset, __le32 *val)
185 memcpy(val, vdev->vconfig + pos, count);
187 memcpy(&virt, perm->virt + offset, count);
189 /* Any non-virtualized bits? */
190 if (cpu_to_le32(~0U >> (32 - (count * 8))) != virt) {
191 struct pci_dev *pdev = vdev->pdev;
195 ret = vfio_user_config_read(pdev, pos, &phys_val, count);
199 *val = (phys_val & ~virt) | (*val & virt);
205 static int vfio_default_config_write(struct vfio_pci_device *vdev, int pos,
206 int count, struct perm_bits *perm,
207 int offset, __le32 val)
209 __le32 virt = 0, write = 0;
211 memcpy(&write, perm->write + offset, count);
214 return count; /* drop, no writable bits */
216 memcpy(&virt, perm->virt + offset, count);
218 /* Virtualized and writable bits go to vconfig */
222 memcpy(&virt_val, vdev->vconfig + pos, count);
224 virt_val &= ~(write & virt);
225 virt_val |= (val & (write & virt));
227 memcpy(vdev->vconfig + pos, &virt_val, count);
230 /* Non-virtualzed and writable bits go to hardware */
232 struct pci_dev *pdev = vdev->pdev;
236 ret = vfio_user_config_read(pdev, pos, &phys_val, count);
240 phys_val &= ~(write & ~virt);
241 phys_val |= (val & (write & ~virt));
243 ret = vfio_user_config_write(pdev, pos, phys_val, count);
251 /* Allow direct read from hardware, except for capability next pointer */
252 static int vfio_direct_config_read(struct vfio_pci_device *vdev, int pos,
253 int count, struct perm_bits *perm,
254 int offset, __le32 *val)
258 ret = vfio_user_config_read(vdev->pdev, pos, val, count);
260 return pcibios_err_to_errno(ret);
262 if (pos >= PCI_CFG_SPACE_SIZE) { /* Extended cap header mangling */
264 memcpy(val, vdev->vconfig + pos, count);
265 } else if (pos >= PCI_STD_HEADER_SIZEOF) { /* Std cap mangling */
266 if (offset == PCI_CAP_LIST_ID && count > 1)
267 memcpy(val, vdev->vconfig + pos,
268 min(PCI_CAP_FLAGS, count));
269 else if (offset == PCI_CAP_LIST_NEXT)
270 memcpy(val, vdev->vconfig + pos, 1);
276 /* Raw access skips any kind of virtualization */
277 static int vfio_raw_config_write(struct vfio_pci_device *vdev, int pos,
278 int count, struct perm_bits *perm,
279 int offset, __le32 val)
283 ret = vfio_user_config_write(vdev->pdev, pos, val, count);
290 static int vfio_raw_config_read(struct vfio_pci_device *vdev, int pos,
291 int count, struct perm_bits *perm,
292 int offset, __le32 *val)
296 ret = vfio_user_config_read(vdev->pdev, pos, val, count);
298 return pcibios_err_to_errno(ret);
303 /* Virt access uses only virtualization */
304 static int vfio_virt_config_write(struct vfio_pci_device *vdev, int pos,
305 int count, struct perm_bits *perm,
306 int offset, __le32 val)
308 memcpy(vdev->vconfig + pos, &val, count);
312 static int vfio_virt_config_read(struct vfio_pci_device *vdev, int pos,
313 int count, struct perm_bits *perm,
314 int offset, __le32 *val)
316 memcpy(val, vdev->vconfig + pos, count);
320 /* Default capability regions to read-only, no-virtualization */
321 static struct perm_bits cap_perms[PCI_CAP_ID_MAX + 1] = {
322 [0 ... PCI_CAP_ID_MAX] = { .readfn = vfio_direct_config_read }
324 static struct perm_bits ecap_perms[PCI_EXT_CAP_ID_MAX + 1] = {
325 [0 ... PCI_EXT_CAP_ID_MAX] = { .readfn = vfio_direct_config_read }
328 * Default unassigned regions to raw read-write access. Some devices
329 * require this to function as they hide registers between the gaps in
330 * config space (be2net). Like MMIO and I/O port registers, we have
331 * to trust the hardware isolation.
333 static struct perm_bits unassigned_perms = {
334 .readfn = vfio_raw_config_read,
335 .writefn = vfio_raw_config_write
338 static struct perm_bits virt_perms = {
339 .readfn = vfio_virt_config_read,
340 .writefn = vfio_virt_config_write
343 static void free_perm_bits(struct perm_bits *perm)
351 static int alloc_perm_bits(struct perm_bits *perm, int size)
354 * Round up all permission bits to the next dword, this lets us
355 * ignore whether a read/write exceeds the defined capability
356 * structure. We can do this because:
357 * - Standard config space is already dword aligned
358 * - Capabilities are all dword aligned (bits 0:1 of next reserved)
359 * - Express capabilities defined as dword aligned
361 size = round_up(size, 4);
365 * - All Readable, None Writeable, None Virtualized
367 perm->virt = kzalloc(size, GFP_KERNEL);
368 perm->write = kzalloc(size, GFP_KERNEL);
369 if (!perm->virt || !perm->write) {
370 free_perm_bits(perm);
374 perm->readfn = vfio_default_config_read;
375 perm->writefn = vfio_default_config_write;
381 * Helper functions for filling in permission tables
383 static inline void p_setb(struct perm_bits *p, int off, u8 virt, u8 write)
386 p->write[off] = write;
389 /* Handle endian-ness - pci and tables are little-endian */
390 static inline void p_setw(struct perm_bits *p, int off, u16 virt, u16 write)
392 *(__le16 *)(&p->virt[off]) = cpu_to_le16(virt);
393 *(__le16 *)(&p->write[off]) = cpu_to_le16(write);
396 /* Handle endian-ness - pci and tables are little-endian */
397 static inline void p_setd(struct perm_bits *p, int off, u32 virt, u32 write)
399 *(__le32 *)(&p->virt[off]) = cpu_to_le32(virt);
400 *(__le32 *)(&p->write[off]) = cpu_to_le32(write);
404 * Restore the *real* BARs after we detect a FLR or backdoor reset.
405 * (backdoor = some device specific technique that we didn't catch)
407 static void vfio_bar_restore(struct vfio_pci_device *vdev)
409 struct pci_dev *pdev = vdev->pdev;
410 u32 *rbar = vdev->rbar;
417 pr_info("%s: %s reset recovery - restoring bars\n",
418 __func__, dev_name(&pdev->dev));
420 for (i = PCI_BASE_ADDRESS_0; i <= PCI_BASE_ADDRESS_5; i += 4, rbar++)
421 pci_user_write_config_dword(pdev, i, *rbar);
423 pci_user_write_config_dword(pdev, PCI_ROM_ADDRESS, *rbar);
426 pci_user_read_config_word(pdev, PCI_COMMAND, &cmd);
427 cmd |= PCI_COMMAND_INTX_DISABLE;
428 pci_user_write_config_word(pdev, PCI_COMMAND, cmd);
432 static __le32 vfio_generate_bar_flags(struct pci_dev *pdev, int bar)
434 unsigned long flags = pci_resource_flags(pdev, bar);
437 if (flags & IORESOURCE_IO)
438 return cpu_to_le32(PCI_BASE_ADDRESS_SPACE_IO);
440 val = PCI_BASE_ADDRESS_SPACE_MEMORY;
442 if (flags & IORESOURCE_PREFETCH)
443 val |= PCI_BASE_ADDRESS_MEM_PREFETCH;
445 if (flags & IORESOURCE_MEM_64)
446 val |= PCI_BASE_ADDRESS_MEM_TYPE_64;
448 return cpu_to_le32(val);
452 * Pretend we're hardware and tweak the values of the *virtual* PCI BARs
453 * to reflect the hardware capabilities. This implements BAR sizing.
455 static void vfio_bar_fixup(struct vfio_pci_device *vdev)
457 struct pci_dev *pdev = vdev->pdev;
462 bar = (__le32 *)&vdev->vconfig[PCI_BASE_ADDRESS_0];
464 for (i = PCI_STD_RESOURCES; i <= PCI_STD_RESOURCE_END; i++, bar++) {
465 if (!pci_resource_start(pdev, i)) {
466 *bar = 0; /* Unmapped by host = unimplemented to user */
470 mask = ~(pci_resource_len(pdev, i) - 1);
472 *bar &= cpu_to_le32((u32)mask);
473 *bar |= vfio_generate_bar_flags(pdev, i);
475 if (*bar & cpu_to_le32(PCI_BASE_ADDRESS_MEM_TYPE_64)) {
477 *bar &= cpu_to_le32((u32)(mask >> 32));
482 bar = (__le32 *)&vdev->vconfig[PCI_ROM_ADDRESS];
485 * NB. REGION_INFO will have reported zero size if we weren't able
486 * to read the ROM, but we still return the actual BAR size here if
487 * it exists (or the shadow ROM space).
489 if (pci_resource_start(pdev, PCI_ROM_RESOURCE)) {
490 mask = ~(pci_resource_len(pdev, PCI_ROM_RESOURCE) - 1);
491 mask |= PCI_ROM_ADDRESS_ENABLE;
492 *bar &= cpu_to_le32((u32)mask);
493 } else if (pdev->resource[PCI_ROM_RESOURCE].flags &
494 IORESOURCE_ROM_SHADOW) {
495 mask = ~(0x20000 - 1);
496 mask |= PCI_ROM_ADDRESS_ENABLE;
497 *bar &= cpu_to_le32((u32)mask);
501 vdev->bardirty = false;
504 static int vfio_basic_config_read(struct vfio_pci_device *vdev, int pos,
505 int count, struct perm_bits *perm,
506 int offset, __le32 *val)
508 if (is_bar(offset)) /* pos == offset for basic config */
509 vfio_bar_fixup(vdev);
511 count = vfio_default_config_read(vdev, pos, count, perm, offset, val);
513 /* Mask in virtual memory enable for SR-IOV devices */
514 if (offset == PCI_COMMAND && vdev->pdev->is_virtfn) {
515 u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]);
516 u32 tmp_val = le32_to_cpu(*val);
518 tmp_val |= cmd & PCI_COMMAND_MEMORY;
519 *val = cpu_to_le32(tmp_val);
525 /* Test whether BARs match the value we think they should contain */
526 static bool vfio_need_bar_restore(struct vfio_pci_device *vdev)
528 int i = 0, pos = PCI_BASE_ADDRESS_0, ret;
531 for (; pos <= PCI_BASE_ADDRESS_5; i++, pos += 4) {
533 ret = pci_user_read_config_dword(vdev->pdev, pos, &bar);
534 if (ret || vdev->rbar[i] != bar)
542 static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
543 int count, struct perm_bits *perm,
544 int offset, __le32 val)
546 struct pci_dev *pdev = vdev->pdev;
551 virt_cmd = (__le16 *)&vdev->vconfig[PCI_COMMAND];
553 if (offset == PCI_COMMAND) {
554 bool phys_mem, virt_mem, new_mem, phys_io, virt_io, new_io;
557 ret = pci_user_read_config_word(pdev, PCI_COMMAND, &phys_cmd);
561 new_cmd = le32_to_cpu(val);
563 phys_mem = !!(phys_cmd & PCI_COMMAND_MEMORY);
564 virt_mem = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_MEMORY);
565 new_mem = !!(new_cmd & PCI_COMMAND_MEMORY);
567 phys_io = !!(phys_cmd & PCI_COMMAND_IO);
568 virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO);
569 new_io = !!(new_cmd & PCI_COMMAND_IO);
572 * If the user is writing mem/io enable (new_mem/io) and we
573 * think it's already enabled (virt_mem/io), but the hardware
574 * shows it disabled (phys_mem/io, then the device has
575 * undergone some kind of backdoor reset and needs to be
576 * restored before we allow it to enable the bars.
577 * SR-IOV devices will trigger this, but we catch them later
579 if ((new_mem && virt_mem && !phys_mem) ||
580 (new_io && virt_io && !phys_io) ||
581 vfio_need_bar_restore(vdev))
582 vfio_bar_restore(vdev);
585 count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
590 * Save current memory/io enable bits in vconfig to allow for
591 * the test above next time.
593 if (offset == PCI_COMMAND) {
594 u16 mask = PCI_COMMAND_MEMORY | PCI_COMMAND_IO;
596 *virt_cmd &= cpu_to_le16(~mask);
597 *virt_cmd |= cpu_to_le16(new_cmd & mask);
600 /* Emulate INTx disable */
601 if (offset >= PCI_COMMAND && offset <= PCI_COMMAND + 1) {
602 bool virt_intx_disable;
604 virt_intx_disable = !!(le16_to_cpu(*virt_cmd) &
605 PCI_COMMAND_INTX_DISABLE);
607 if (virt_intx_disable && !vdev->virq_disabled) {
608 vdev->virq_disabled = true;
609 vfio_pci_intx_mask(vdev);
610 } else if (!virt_intx_disable && vdev->virq_disabled) {
611 vdev->virq_disabled = false;
612 vfio_pci_intx_unmask(vdev);
617 vdev->bardirty = true;
622 /* Permissions for the Basic PCI Header */
623 static int __init init_pci_cap_basic_perm(struct perm_bits *perm)
625 if (alloc_perm_bits(perm, PCI_STD_HEADER_SIZEOF))
628 perm->readfn = vfio_basic_config_read;
629 perm->writefn = vfio_basic_config_write;
631 /* Virtualized for SR-IOV functions, which just have FFFF */
632 p_setw(perm, PCI_VENDOR_ID, (u16)ALL_VIRT, NO_WRITE);
633 p_setw(perm, PCI_DEVICE_ID, (u16)ALL_VIRT, NO_WRITE);
636 * Virtualize INTx disable, we use it internally for interrupt
637 * control and can emulate it for non-PCI 2.3 devices.
639 p_setw(perm, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE, (u16)ALL_WRITE);
641 /* Virtualize capability list, we might want to skip/disable */
642 p_setw(perm, PCI_STATUS, PCI_STATUS_CAP_LIST, NO_WRITE);
644 /* No harm to write */
645 p_setb(perm, PCI_CACHE_LINE_SIZE, NO_VIRT, (u8)ALL_WRITE);
646 p_setb(perm, PCI_LATENCY_TIMER, NO_VIRT, (u8)ALL_WRITE);
647 p_setb(perm, PCI_BIST, NO_VIRT, (u8)ALL_WRITE);
649 /* Virtualize all bars, can't touch the real ones */
650 p_setd(perm, PCI_BASE_ADDRESS_0, ALL_VIRT, ALL_WRITE);
651 p_setd(perm, PCI_BASE_ADDRESS_1, ALL_VIRT, ALL_WRITE);
652 p_setd(perm, PCI_BASE_ADDRESS_2, ALL_VIRT, ALL_WRITE);
653 p_setd(perm, PCI_BASE_ADDRESS_3, ALL_VIRT, ALL_WRITE);
654 p_setd(perm, PCI_BASE_ADDRESS_4, ALL_VIRT, ALL_WRITE);
655 p_setd(perm, PCI_BASE_ADDRESS_5, ALL_VIRT, ALL_WRITE);
656 p_setd(perm, PCI_ROM_ADDRESS, ALL_VIRT, ALL_WRITE);
658 /* Allow us to adjust capability chain */
659 p_setb(perm, PCI_CAPABILITY_LIST, (u8)ALL_VIRT, NO_WRITE);
661 /* Sometimes used by sw, just virtualize */
662 p_setb(perm, PCI_INTERRUPT_LINE, (u8)ALL_VIRT, (u8)ALL_WRITE);
664 /* Virtualize interrupt pin to allow hiding INTx */
665 p_setb(perm, PCI_INTERRUPT_PIN, (u8)ALL_VIRT, (u8)NO_WRITE);
670 static int vfio_pm_config_write(struct vfio_pci_device *vdev, int pos,
671 int count, struct perm_bits *perm,
672 int offset, __le32 val)
674 count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
678 if (offset == PCI_PM_CTRL) {
681 switch (le32_to_cpu(val) & PCI_PM_CTRL_STATE_MASK) {
696 pci_set_power_state(vdev->pdev, state);
702 /* Permissions for the Power Management capability */
703 static int __init init_pci_cap_pm_perm(struct perm_bits *perm)
705 if (alloc_perm_bits(perm, pci_cap_length[PCI_CAP_ID_PM]))
708 perm->writefn = vfio_pm_config_write;
711 * We always virtualize the next field so we can remove
712 * capabilities from the chain if we want to.
714 p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
717 * Power management is defined *per function*, so we can let
718 * the user change power state, but we trap and initiate the
719 * change ourselves, so the state bits are read-only.
721 p_setd(perm, PCI_PM_CTRL, NO_VIRT, ~PCI_PM_CTRL_STATE_MASK);
725 static int vfio_vpd_config_write(struct vfio_pci_device *vdev, int pos,
726 int count, struct perm_bits *perm,
727 int offset, __le32 val)
729 struct pci_dev *pdev = vdev->pdev;
730 __le16 *paddr = (__le16 *)(vdev->vconfig + pos - offset + PCI_VPD_ADDR);
731 __le32 *pdata = (__le32 *)(vdev->vconfig + pos - offset + PCI_VPD_DATA);
736 * Write through to emulation. If the write includes the upper byte
737 * of PCI_VPD_ADDR, then the PCI_VPD_ADDR_F bit is written and we
740 count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
741 if (count < 0 || offset > PCI_VPD_ADDR + 1 ||
742 offset + count <= PCI_VPD_ADDR + 1)
745 addr = le16_to_cpu(*paddr);
747 if (addr & PCI_VPD_ADDR_F) {
748 data = le32_to_cpu(*pdata);
749 if (pci_write_vpd(pdev, addr & ~PCI_VPD_ADDR_F, 4, &data) != 4)
753 if (pci_read_vpd(pdev, addr, 4, &data) < 0)
755 *pdata = cpu_to_le32(data);
759 * Toggle PCI_VPD_ADDR_F in the emulated PCI_VPD_ADDR register to
760 * signal completion. If an error occurs above, we assume that not
761 * toggling this bit will induce a driver timeout.
763 addr ^= PCI_VPD_ADDR_F;
764 *paddr = cpu_to_le16(addr);
769 /* Permissions for Vital Product Data capability */
770 static int __init init_pci_cap_vpd_perm(struct perm_bits *perm)
772 if (alloc_perm_bits(perm, pci_cap_length[PCI_CAP_ID_VPD]))
775 perm->writefn = vfio_vpd_config_write;
778 * We always virtualize the next field so we can remove
779 * capabilities from the chain if we want to.
781 p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
784 * Both the address and data registers are virtualized to
785 * enable access through the pci_vpd_read/write functions
787 p_setw(perm, PCI_VPD_ADDR, (u16)ALL_VIRT, (u16)ALL_WRITE);
788 p_setd(perm, PCI_VPD_DATA, ALL_VIRT, ALL_WRITE);
793 /* Permissions for PCI-X capability */
794 static int __init init_pci_cap_pcix_perm(struct perm_bits *perm)
796 /* Alloc 24, but only 8 are used in v0 */
797 if (alloc_perm_bits(perm, PCI_CAP_PCIX_SIZEOF_V2))
800 p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
802 p_setw(perm, PCI_X_CMD, NO_VIRT, (u16)ALL_WRITE);
803 p_setd(perm, PCI_X_ECC_CSR, NO_VIRT, ALL_WRITE);
807 static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos,
808 int count, struct perm_bits *perm,
809 int offset, __le32 val)
811 __le16 *ctrl = (__le16 *)(vdev->vconfig + pos -
812 offset + PCI_EXP_DEVCTL);
814 count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
819 * The FLR bit is virtualized, if set and the device supports PCIe
820 * FLR, issue a reset_function. Regardless, clear the bit, the spec
821 * requires it to be always read as zero. NB, reset_function might
822 * not use a PCIe FLR, we don't have that level of granularity.
824 if (*ctrl & cpu_to_le16(PCI_EXP_DEVCTL_BCR_FLR)) {
828 *ctrl &= ~cpu_to_le16(PCI_EXP_DEVCTL_BCR_FLR);
830 ret = pci_user_read_config_dword(vdev->pdev,
831 pos - offset + PCI_EXP_DEVCAP,
834 if (!ret && (cap & PCI_EXP_DEVCAP_FLR))
835 pci_try_reset_function(vdev->pdev);
841 /* Permissions for PCI Express capability */
842 static int __init init_pci_cap_exp_perm(struct perm_bits *perm)
844 /* Alloc larger of two possible sizes */
845 if (alloc_perm_bits(perm, PCI_CAP_EXP_ENDPOINT_SIZEOF_V2))
848 perm->writefn = vfio_exp_config_write;
850 p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
853 * Allow writes to device control fields, except devctl_phantom,
854 * which could confuse IOMMU, and the ARI bit in devctl2, which
855 * is set at probe time. FLR gets virtualized via our writefn.
857 p_setw(perm, PCI_EXP_DEVCTL,
858 PCI_EXP_DEVCTL_BCR_FLR, ~PCI_EXP_DEVCTL_PHANTOM);
859 p_setw(perm, PCI_EXP_DEVCTL2, NO_VIRT, ~PCI_EXP_DEVCTL2_ARI);
863 static int vfio_af_config_write(struct vfio_pci_device *vdev, int pos,
864 int count, struct perm_bits *perm,
865 int offset, __le32 val)
867 u8 *ctrl = vdev->vconfig + pos - offset + PCI_AF_CTRL;
869 count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
874 * The FLR bit is virtualized, if set and the device supports AF
875 * FLR, issue a reset_function. Regardless, clear the bit, the spec
876 * requires it to be always read as zero. NB, reset_function might
877 * not use an AF FLR, we don't have that level of granularity.
879 if (*ctrl & PCI_AF_CTRL_FLR) {
883 *ctrl &= ~PCI_AF_CTRL_FLR;
885 ret = pci_user_read_config_byte(vdev->pdev,
886 pos - offset + PCI_AF_CAP,
889 if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP))
890 pci_try_reset_function(vdev->pdev);
896 /* Permissions for Advanced Function capability */
897 static int __init init_pci_cap_af_perm(struct perm_bits *perm)
899 if (alloc_perm_bits(perm, pci_cap_length[PCI_CAP_ID_AF]))
902 perm->writefn = vfio_af_config_write;
904 p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
905 p_setb(perm, PCI_AF_CTRL, PCI_AF_CTRL_FLR, PCI_AF_CTRL_FLR);
909 /* Permissions for Advanced Error Reporting extended capability */
910 static int __init init_pci_ext_cap_err_perm(struct perm_bits *perm)
914 if (alloc_perm_bits(perm, pci_ext_cap_length[PCI_EXT_CAP_ID_ERR]))
918 * Virtualize the first dword of all express capabilities
919 * because it includes the next pointer. This lets us later
920 * remove capabilities from the chain if we need to.
922 p_setd(perm, 0, ALL_VIRT, NO_WRITE);
924 /* Writable bits mask */
925 mask = PCI_ERR_UNC_UND | /* Undefined */
926 PCI_ERR_UNC_DLP | /* Data Link Protocol */
927 PCI_ERR_UNC_SURPDN | /* Surprise Down */
928 PCI_ERR_UNC_POISON_TLP | /* Poisoned TLP */
929 PCI_ERR_UNC_FCP | /* Flow Control Protocol */
930 PCI_ERR_UNC_COMP_TIME | /* Completion Timeout */
931 PCI_ERR_UNC_COMP_ABORT | /* Completer Abort */
932 PCI_ERR_UNC_UNX_COMP | /* Unexpected Completion */
933 PCI_ERR_UNC_RX_OVER | /* Receiver Overflow */
934 PCI_ERR_UNC_MALF_TLP | /* Malformed TLP */
935 PCI_ERR_UNC_ECRC | /* ECRC Error Status */
936 PCI_ERR_UNC_UNSUP | /* Unsupported Request */
937 PCI_ERR_UNC_ACSV | /* ACS Violation */
938 PCI_ERR_UNC_INTN | /* internal error */
939 PCI_ERR_UNC_MCBTLP | /* MC blocked TLP */
940 PCI_ERR_UNC_ATOMEG | /* Atomic egress blocked */
941 PCI_ERR_UNC_TLPPRE; /* TLP prefix blocked */
942 p_setd(perm, PCI_ERR_UNCOR_STATUS, NO_VIRT, mask);
943 p_setd(perm, PCI_ERR_UNCOR_MASK, NO_VIRT, mask);
944 p_setd(perm, PCI_ERR_UNCOR_SEVER, NO_VIRT, mask);
946 mask = PCI_ERR_COR_RCVR | /* Receiver Error Status */
947 PCI_ERR_COR_BAD_TLP | /* Bad TLP Status */
948 PCI_ERR_COR_BAD_DLLP | /* Bad DLLP Status */
949 PCI_ERR_COR_REP_ROLL | /* REPLAY_NUM Rollover */
950 PCI_ERR_COR_REP_TIMER | /* Replay Timer Timeout */
951 PCI_ERR_COR_ADV_NFAT | /* Advisory Non-Fatal */
952 PCI_ERR_COR_INTERNAL | /* Corrected Internal */
953 PCI_ERR_COR_LOG_OVER; /* Header Log Overflow */
954 p_setd(perm, PCI_ERR_COR_STATUS, NO_VIRT, mask);
955 p_setd(perm, PCI_ERR_COR_MASK, NO_VIRT, mask);
957 mask = PCI_ERR_CAP_ECRC_GENE | /* ECRC Generation Enable */
958 PCI_ERR_CAP_ECRC_CHKE; /* ECRC Check Enable */
959 p_setd(perm, PCI_ERR_CAP, NO_VIRT, mask);
963 /* Permissions for Power Budgeting extended capability */
964 static int __init init_pci_ext_cap_pwr_perm(struct perm_bits *perm)
966 if (alloc_perm_bits(perm, pci_ext_cap_length[PCI_EXT_CAP_ID_PWR]))
969 p_setd(perm, 0, ALL_VIRT, NO_WRITE);
971 /* Writing the data selector is OK, the info is still read-only */
972 p_setb(perm, PCI_PWR_DATA, NO_VIRT, (u8)ALL_WRITE);
977 * Initialize the shared permission tables
979 void vfio_pci_uninit_perm_bits(void)
981 free_perm_bits(&cap_perms[PCI_CAP_ID_BASIC]);
983 free_perm_bits(&cap_perms[PCI_CAP_ID_PM]);
984 free_perm_bits(&cap_perms[PCI_CAP_ID_VPD]);
985 free_perm_bits(&cap_perms[PCI_CAP_ID_PCIX]);
986 free_perm_bits(&cap_perms[PCI_CAP_ID_EXP]);
987 free_perm_bits(&cap_perms[PCI_CAP_ID_AF]);
989 free_perm_bits(&ecap_perms[PCI_EXT_CAP_ID_ERR]);
990 free_perm_bits(&ecap_perms[PCI_EXT_CAP_ID_PWR]);
993 int __init vfio_pci_init_perm_bits(void)
997 /* Basic config space */
998 ret = init_pci_cap_basic_perm(&cap_perms[PCI_CAP_ID_BASIC]);
1001 ret |= init_pci_cap_pm_perm(&cap_perms[PCI_CAP_ID_PM]);
1002 ret |= init_pci_cap_vpd_perm(&cap_perms[PCI_CAP_ID_VPD]);
1003 ret |= init_pci_cap_pcix_perm(&cap_perms[PCI_CAP_ID_PCIX]);
1004 cap_perms[PCI_CAP_ID_VNDR].writefn = vfio_raw_config_write;
1005 ret |= init_pci_cap_exp_perm(&cap_perms[PCI_CAP_ID_EXP]);
1006 ret |= init_pci_cap_af_perm(&cap_perms[PCI_CAP_ID_AF]);
1008 /* Extended capabilities */
1009 ret |= init_pci_ext_cap_err_perm(&ecap_perms[PCI_EXT_CAP_ID_ERR]);
1010 ret |= init_pci_ext_cap_pwr_perm(&ecap_perms[PCI_EXT_CAP_ID_PWR]);
1011 ecap_perms[PCI_EXT_CAP_ID_VNDR].writefn = vfio_raw_config_write;
1014 vfio_pci_uninit_perm_bits();
1019 static int vfio_find_cap_start(struct vfio_pci_device *vdev, int pos)
1022 int base = (pos >= PCI_CFG_SPACE_SIZE) ? PCI_CFG_SPACE_SIZE :
1023 PCI_STD_HEADER_SIZEOF;
1024 cap = vdev->pci_config_map[pos];
1026 if (cap == PCI_CAP_ID_BASIC)
1029 /* XXX Can we have to abutting capabilities of the same type? */
1030 while (pos - 1 >= base && vdev->pci_config_map[pos - 1] == cap)
1036 static int vfio_msi_config_read(struct vfio_pci_device *vdev, int pos,
1037 int count, struct perm_bits *perm,
1038 int offset, __le32 *val)
1040 /* Update max available queue size from msi_qmax */
1041 if (offset <= PCI_MSI_FLAGS && offset + count >= PCI_MSI_FLAGS) {
1045 start = vfio_find_cap_start(vdev, pos);
1047 flags = (__le16 *)&vdev->vconfig[start];
1049 *flags &= cpu_to_le16(~PCI_MSI_FLAGS_QMASK);
1050 *flags |= cpu_to_le16(vdev->msi_qmax << 1);
1053 return vfio_default_config_read(vdev, pos, count, perm, offset, val);
1056 static int vfio_msi_config_write(struct vfio_pci_device *vdev, int pos,
1057 int count, struct perm_bits *perm,
1058 int offset, __le32 val)
1060 count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
1064 /* Fixup and write configured queue size and enable to hardware */
1065 if (offset <= PCI_MSI_FLAGS && offset + count >= PCI_MSI_FLAGS) {
1070 start = vfio_find_cap_start(vdev, pos);
1072 pflags = (__le16 *)&vdev->vconfig[start + PCI_MSI_FLAGS];
1074 flags = le16_to_cpu(*pflags);
1076 /* MSI is enabled via ioctl */
1078 flags &= ~PCI_MSI_FLAGS_ENABLE;
1080 /* Check queue size */
1081 if ((flags & PCI_MSI_FLAGS_QSIZE) >> 4 > vdev->msi_qmax) {
1082 flags &= ~PCI_MSI_FLAGS_QSIZE;
1083 flags |= vdev->msi_qmax << 4;
1086 /* Write back to virt and to hardware */
1087 *pflags = cpu_to_le16(flags);
1088 ret = pci_user_write_config_word(vdev->pdev,
1089 start + PCI_MSI_FLAGS,
1092 return pcibios_err_to_errno(ret);
1099 * MSI determination is per-device, so this routine gets used beyond
1100 * initialization time. Don't add __init
1102 static int init_pci_cap_msi_perm(struct perm_bits *perm, int len, u16 flags)
1104 if (alloc_perm_bits(perm, len))
1107 perm->readfn = vfio_msi_config_read;
1108 perm->writefn = vfio_msi_config_write;
1110 p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
1113 * The upper byte of the control register is reserved,
1114 * just setup the lower byte.
1116 p_setb(perm, PCI_MSI_FLAGS, (u8)ALL_VIRT, (u8)ALL_WRITE);
1117 p_setd(perm, PCI_MSI_ADDRESS_LO, ALL_VIRT, ALL_WRITE);
1118 if (flags & PCI_MSI_FLAGS_64BIT) {
1119 p_setd(perm, PCI_MSI_ADDRESS_HI, ALL_VIRT, ALL_WRITE);
1120 p_setw(perm, PCI_MSI_DATA_64, (u16)ALL_VIRT, (u16)ALL_WRITE);
1121 if (flags & PCI_MSI_FLAGS_MASKBIT) {
1122 p_setd(perm, PCI_MSI_MASK_64, NO_VIRT, ALL_WRITE);
1123 p_setd(perm, PCI_MSI_PENDING_64, NO_VIRT, ALL_WRITE);
1126 p_setw(perm, PCI_MSI_DATA_32, (u16)ALL_VIRT, (u16)ALL_WRITE);
1127 if (flags & PCI_MSI_FLAGS_MASKBIT) {
1128 p_setd(perm, PCI_MSI_MASK_32, NO_VIRT, ALL_WRITE);
1129 p_setd(perm, PCI_MSI_PENDING_32, NO_VIRT, ALL_WRITE);
1135 /* Determine MSI CAP field length; initialize msi_perms on 1st call per vdev */
1136 static int vfio_msi_cap_len(struct vfio_pci_device *vdev, u8 pos)
1138 struct pci_dev *pdev = vdev->pdev;
1142 ret = pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &flags);
1144 return pcibios_err_to_errno(ret);
1146 len = 10; /* Minimum size */
1147 if (flags & PCI_MSI_FLAGS_64BIT)
1149 if (flags & PCI_MSI_FLAGS_MASKBIT)
1155 vdev->msi_perm = kmalloc(sizeof(struct perm_bits), GFP_KERNEL);
1156 if (!vdev->msi_perm)
1159 ret = init_pci_cap_msi_perm(vdev->msi_perm, len, flags);
1166 /* Determine extended capability length for VC (2 & 9) and MFVC */
1167 static int vfio_vc_cap_len(struct vfio_pci_device *vdev, u16 pos)
1169 struct pci_dev *pdev = vdev->pdev;
1171 int ret, evcc, phases, vc_arb;
1172 int len = PCI_CAP_VC_BASE_SIZEOF;
1174 ret = pci_read_config_dword(pdev, pos + PCI_VC_PORT_CAP1, &tmp);
1176 return pcibios_err_to_errno(ret);
1178 evcc = tmp & PCI_VC_CAP1_EVCC; /* extended vc count */
1179 ret = pci_read_config_dword(pdev, pos + PCI_VC_PORT_CAP2, &tmp);
1181 return pcibios_err_to_errno(ret);
1183 if (tmp & PCI_VC_CAP2_128_PHASE)
1185 else if (tmp & PCI_VC_CAP2_64_PHASE)
1187 else if (tmp & PCI_VC_CAP2_32_PHASE)
1192 vc_arb = phases * 4;
1195 * Port arbitration tables are root & switch only;
1196 * function arbitration tables are function 0 only.
1197 * In either case, we'll never let user write them so
1198 * we don't care how big they are
1200 len += (1 + evcc) * PCI_CAP_VC_PER_VC_SIZEOF;
1202 len = round_up(len, 16);
1208 static int vfio_cap_len(struct vfio_pci_device *vdev, u8 cap, u8 pos)
1210 struct pci_dev *pdev = vdev->pdev;
1217 case PCI_CAP_ID_MSI:
1218 return vfio_msi_cap_len(vdev, pos);
1219 case PCI_CAP_ID_PCIX:
1220 ret = pci_read_config_word(pdev, pos + PCI_X_CMD, &word);
1222 return pcibios_err_to_errno(ret);
1224 if (PCI_X_CMD_VERSION(word)) {
1225 if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) {
1226 /* Test for extended capabilities */
1227 pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE,
1229 vdev->extended_caps = (dword != 0);
1231 return PCI_CAP_PCIX_SIZEOF_V2;
1233 return PCI_CAP_PCIX_SIZEOF_V0;
1234 case PCI_CAP_ID_VNDR:
1235 /* length follows next field */
1236 ret = pci_read_config_byte(pdev, pos + PCI_CAP_FLAGS, &byte);
1238 return pcibios_err_to_errno(ret);
1241 case PCI_CAP_ID_EXP:
1242 if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) {
1243 /* Test for extended capabilities */
1244 pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &dword);
1245 vdev->extended_caps = (dword != 0);
1248 /* length based on version */
1249 if ((pcie_caps_reg(pdev) & PCI_EXP_FLAGS_VERS) == 1)
1250 return PCI_CAP_EXP_ENDPOINT_SIZEOF_V1;
1252 return PCI_CAP_EXP_ENDPOINT_SIZEOF_V2;
1254 ret = pci_read_config_byte(pdev, pos + 3, &byte);
1256 return pcibios_err_to_errno(ret);
1258 return (byte & HT_3BIT_CAP_MASK) ?
1259 HT_CAP_SIZEOF_SHORT : HT_CAP_SIZEOF_LONG;
1260 case PCI_CAP_ID_SATA:
1261 ret = pci_read_config_byte(pdev, pos + PCI_SATA_REGS, &byte);
1263 return pcibios_err_to_errno(ret);
1265 byte &= PCI_SATA_REGS_MASK;
1266 if (byte == PCI_SATA_REGS_INLINE)
1267 return PCI_SATA_SIZEOF_LONG;
1269 return PCI_SATA_SIZEOF_SHORT;
1271 pr_warn("%s: %s unknown length for pci cap 0x%x@0x%x\n",
1272 dev_name(&pdev->dev), __func__, cap, pos);
1278 static int vfio_ext_cap_len(struct vfio_pci_device *vdev, u16 ecap, u16 epos)
1280 struct pci_dev *pdev = vdev->pdev;
1286 case PCI_EXT_CAP_ID_VNDR:
1287 ret = pci_read_config_dword(pdev, epos + PCI_VSEC_HDR, &dword);
1289 return pcibios_err_to_errno(ret);
1291 return dword >> PCI_VSEC_HDR_LEN_SHIFT;
1292 case PCI_EXT_CAP_ID_VC:
1293 case PCI_EXT_CAP_ID_VC9:
1294 case PCI_EXT_CAP_ID_MFVC:
1295 return vfio_vc_cap_len(vdev, epos);
1296 case PCI_EXT_CAP_ID_ACS:
1297 ret = pci_read_config_byte(pdev, epos + PCI_ACS_CAP, &byte);
1299 return pcibios_err_to_errno(ret);
1301 if (byte & PCI_ACS_EC) {
1304 ret = pci_read_config_byte(pdev,
1305 epos + PCI_ACS_EGRESS_BITS,
1308 return pcibios_err_to_errno(ret);
1310 bits = byte ? round_up(byte, 32) : 256;
1311 return 8 + (bits / 8);
1315 case PCI_EXT_CAP_ID_REBAR:
1316 ret = pci_read_config_byte(pdev, epos + PCI_REBAR_CTRL, &byte);
1318 return pcibios_err_to_errno(ret);
1320 byte &= PCI_REBAR_CTRL_NBAR_MASK;
1321 byte >>= PCI_REBAR_CTRL_NBAR_SHIFT;
1323 return 4 + (byte * 8);
1324 case PCI_EXT_CAP_ID_DPA:
1325 ret = pci_read_config_byte(pdev, epos + PCI_DPA_CAP, &byte);
1327 return pcibios_err_to_errno(ret);
1329 byte &= PCI_DPA_CAP_SUBSTATE_MASK;
1330 return PCI_DPA_BASE_SIZEOF + byte + 1;
1331 case PCI_EXT_CAP_ID_TPH:
1332 ret = pci_read_config_dword(pdev, epos + PCI_TPH_CAP, &dword);
1334 return pcibios_err_to_errno(ret);
1336 if ((dword & PCI_TPH_CAP_LOC_MASK) == PCI_TPH_LOC_CAP) {
1339 sts = dword & PCI_TPH_CAP_ST_MASK;
1340 sts >>= PCI_TPH_CAP_ST_SHIFT;
1341 return PCI_TPH_BASE_SIZEOF + (sts * 2) + 2;
1343 return PCI_TPH_BASE_SIZEOF;
1345 pr_warn("%s: %s unknown length for pci ecap 0x%x@0x%x\n",
1346 dev_name(&pdev->dev), __func__, ecap, epos);
1352 static int vfio_fill_vconfig_bytes(struct vfio_pci_device *vdev,
1353 int offset, int size)
1355 struct pci_dev *pdev = vdev->pdev;
1359 * We try to read physical config space in the largest chunks
1360 * we can, assuming that all of the fields support dword access.
1361 * pci_save_state() makes this same assumption and seems to do ok.
1366 if (size >= 4 && !(offset % 4)) {
1367 __le32 *dwordp = (__le32 *)&vdev->vconfig[offset];
1370 ret = pci_read_config_dword(pdev, offset, &dword);
1373 *dwordp = cpu_to_le32(dword);
1375 } else if (size >= 2 && !(offset % 2)) {
1376 __le16 *wordp = (__le16 *)&vdev->vconfig[offset];
1379 ret = pci_read_config_word(pdev, offset, &word);
1382 *wordp = cpu_to_le16(word);
1385 u8 *byte = &vdev->vconfig[offset];
1386 ret = pci_read_config_byte(pdev, offset, byte);
1399 static int vfio_cap_init(struct vfio_pci_device *vdev)
1401 struct pci_dev *pdev = vdev->pdev;
1402 u8 *map = vdev->pci_config_map;
1405 int loops, ret, caps = 0;
1407 /* Any capabilities? */
1408 ret = pci_read_config_word(pdev, PCI_STATUS, &status);
1412 if (!(status & PCI_STATUS_CAP_LIST))
1413 return 0; /* Done */
1415 ret = pci_read_config_byte(pdev, PCI_CAPABILITY_LIST, &pos);
1419 /* Mark the previous position in case we want to skip a capability */
1420 prev = &vdev->vconfig[PCI_CAPABILITY_LIST];
1422 /* We can bound our loop, capabilities are dword aligned */
1423 loops = (PCI_CFG_SPACE_SIZE - PCI_STD_HEADER_SIZEOF) / PCI_CAP_SIZEOF;
1424 while (pos && loops--) {
1428 ret = pci_read_config_byte(pdev, pos, &cap);
1432 ret = pci_read_config_byte(pdev,
1433 pos + PCI_CAP_LIST_NEXT, &next);
1437 if (cap <= PCI_CAP_ID_MAX) {
1438 len = pci_cap_length[cap];
1439 if (len == 0xFF) { /* Variable length */
1440 len = vfio_cap_len(vdev, cap, pos);
1447 pr_info("%s: %s hiding cap 0x%x\n",
1448 __func__, dev_name(&pdev->dev), cap);
1454 /* Sanity check, do we overlap other capabilities? */
1455 for (i = 0; i < len; i++) {
1456 if (likely(map[pos + i] == PCI_CAP_ID_INVALID))
1459 pr_warn("%s: %s pci config conflict @0x%x, was cap 0x%x now cap 0x%x\n",
1460 __func__, dev_name(&pdev->dev),
1461 pos + i, map[pos + i], cap);
1464 BUILD_BUG_ON(PCI_CAP_ID_MAX >= PCI_CAP_ID_INVALID_VIRT);
1466 memset(map + pos, cap, len);
1467 ret = vfio_fill_vconfig_bytes(vdev, pos, len);
1471 prev = &vdev->vconfig[pos + PCI_CAP_LIST_NEXT];
1476 /* If we didn't fill any capabilities, clear the status flag */
1478 __le16 *vstatus = (__le16 *)&vdev->vconfig[PCI_STATUS];
1479 *vstatus &= ~cpu_to_le16(PCI_STATUS_CAP_LIST);
1485 static int vfio_ecap_init(struct vfio_pci_device *vdev)
1487 struct pci_dev *pdev = vdev->pdev;
1488 u8 *map = vdev->pci_config_map;
1490 __le32 *prev = NULL;
1491 int loops, ret, ecaps = 0;
1493 if (!vdev->extended_caps)
1496 epos = PCI_CFG_SPACE_SIZE;
1498 loops = (pdev->cfg_size - PCI_CFG_SPACE_SIZE) / PCI_CAP_SIZEOF;
1500 while (loops-- && epos >= PCI_CFG_SPACE_SIZE) {
1504 bool hidden = false;
1506 ret = pci_read_config_dword(pdev, epos, &header);
1510 ecap = PCI_EXT_CAP_ID(header);
1512 if (ecap <= PCI_EXT_CAP_ID_MAX) {
1513 len = pci_ext_cap_length[ecap];
1515 len = vfio_ext_cap_len(vdev, ecap, epos);
1522 pr_info("%s: %s hiding ecap 0x%x@0x%x\n",
1523 __func__, dev_name(&pdev->dev), ecap, epos);
1525 /* If not the first in the chain, we can skip over it */
1527 u32 val = epos = PCI_EXT_CAP_NEXT(header);
1528 *prev &= cpu_to_le32(~(0xffcU << 20));
1529 *prev |= cpu_to_le32(val << 20);
1534 * Otherwise, fill in a placeholder, the direct
1535 * readfn will virtualize this automatically
1537 len = PCI_CAP_SIZEOF;
1541 for (i = 0; i < len; i++) {
1542 if (likely(map[epos + i] == PCI_CAP_ID_INVALID))
1545 pr_warn("%s: %s pci config conflict @0x%x, was ecap 0x%x now ecap 0x%x\n",
1546 __func__, dev_name(&pdev->dev),
1547 epos + i, map[epos + i], ecap);
1551 * Even though ecap is 2 bytes, we're currently a long way
1552 * from exceeding 1 byte capabilities. If we ever make it
1553 * up to 0xFE we'll need to up this to a two-byte, byte map.
1555 BUILD_BUG_ON(PCI_EXT_CAP_ID_MAX >= PCI_CAP_ID_INVALID_VIRT);
1557 memset(map + epos, ecap, len);
1558 ret = vfio_fill_vconfig_bytes(vdev, epos, len);
1563 * If we're just using this capability to anchor the list,
1564 * hide the real ID. Only count real ecaps. XXX PCI spec
1565 * indicates to use cap id = 0, version = 0, next = 0 if
1566 * ecaps are absent, hope users check all the way to next.
1569 *(__le32 *)&vdev->vconfig[epos] &=
1570 cpu_to_le32((0xffcU << 20));
1574 prev = (__le32 *)&vdev->vconfig[epos];
1575 epos = PCI_EXT_CAP_NEXT(header);
1579 *(u32 *)&vdev->vconfig[PCI_CFG_SPACE_SIZE] = 0;
1585 * For each device we allocate a pci_config_map that indicates the
1586 * capability occupying each dword and thus the struct perm_bits we
1587 * use for read and write. We also allocate a virtualized config
1588 * space which tracks reads and writes to bits that we emulate for
1589 * the user. Initial values filled from device.
1591 * Using shared struct perm_bits between all vfio-pci devices saves
1592 * us from allocating cfg_size buffers for virt and write for every
1593 * device. We could remove vconfig and allocate individual buffers
1594 * for each area requiring emulated bits, but the array of pointers
1595 * would be comparable in size (at least for standard config space).
1597 int vfio_config_init(struct vfio_pci_device *vdev)
1599 struct pci_dev *pdev = vdev->pdev;
1604 * Config space, caps and ecaps are all dword aligned, so we could
1605 * use one byte per dword to record the type. However, there are
1606 * no requiremenst on the length of a capability, so the gap between
1607 * capabilities needs byte granularity.
1609 map = kmalloc(pdev->cfg_size, GFP_KERNEL);
1613 vconfig = kmalloc(pdev->cfg_size, GFP_KERNEL);
1619 vdev->pci_config_map = map;
1620 vdev->vconfig = vconfig;
1622 memset(map, PCI_CAP_ID_BASIC, PCI_STD_HEADER_SIZEOF);
1623 memset(map + PCI_STD_HEADER_SIZEOF, PCI_CAP_ID_INVALID,
1624 pdev->cfg_size - PCI_STD_HEADER_SIZEOF);
1626 ret = vfio_fill_vconfig_bytes(vdev, 0, PCI_STD_HEADER_SIZEOF);
1630 vdev->bardirty = true;
1633 * XXX can we just pci_load_saved_state/pci_restore_state?
1634 * may need to rebuild vconfig after that
1637 /* For restore after reset */
1638 vdev->rbar[0] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_0]);
1639 vdev->rbar[1] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_1]);
1640 vdev->rbar[2] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_2]);
1641 vdev->rbar[3] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_3]);
1642 vdev->rbar[4] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_4]);
1643 vdev->rbar[5] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_5]);
1644 vdev->rbar[6] = le32_to_cpu(*(__le32 *)&vconfig[PCI_ROM_ADDRESS]);
1646 if (pdev->is_virtfn) {
1647 *(__le16 *)&vconfig[PCI_VENDOR_ID] = cpu_to_le16(pdev->vendor);
1648 *(__le16 *)&vconfig[PCI_DEVICE_ID] = cpu_to_le16(pdev->device);
1651 if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx)
1652 vconfig[PCI_INTERRUPT_PIN] = 0;
1654 ret = vfio_cap_init(vdev);
1658 ret = vfio_ecap_init(vdev);
1666 vdev->pci_config_map = NULL;
1668 vdev->vconfig = NULL;
1669 return pcibios_err_to_errno(ret);
1672 void vfio_config_free(struct vfio_pci_device *vdev)
1674 kfree(vdev->vconfig);
1675 vdev->vconfig = NULL;
1676 kfree(vdev->pci_config_map);
1677 vdev->pci_config_map = NULL;
1678 kfree(vdev->msi_perm);
1679 vdev->msi_perm = NULL;
1683 * Find the remaining number of bytes in a dword that match the given
1684 * position. Stop at either the end of the capability or the dword boundary.
1686 static size_t vfio_pci_cap_remaining_dword(struct vfio_pci_device *vdev,
1689 u8 cap = vdev->pci_config_map[pos];
1692 for (i = 1; (pos + i) % 4 && vdev->pci_config_map[pos + i] == cap; i++)
1698 static ssize_t vfio_config_do_rw(struct vfio_pci_device *vdev, char __user *buf,
1699 size_t count, loff_t *ppos, bool iswrite)
1701 struct pci_dev *pdev = vdev->pdev;
1702 struct perm_bits *perm;
1704 int cap_start = 0, offset;
1708 if (*ppos < 0 || *ppos >= pdev->cfg_size ||
1709 *ppos + count > pdev->cfg_size)
1713 * Chop accesses into aligned chunks containing no more than a
1714 * single capability. Caller increments to the next chunk.
1716 count = min(count, vfio_pci_cap_remaining_dword(vdev, *ppos));
1717 if (count >= 4 && !(*ppos % 4))
1719 else if (count >= 2 && !(*ppos % 2))
1726 cap_id = vdev->pci_config_map[*ppos];
1728 if (cap_id == PCI_CAP_ID_INVALID) {
1729 perm = &unassigned_perms;
1731 } else if (cap_id == PCI_CAP_ID_INVALID_VIRT) {
1735 if (*ppos >= PCI_CFG_SPACE_SIZE) {
1736 WARN_ON(cap_id > PCI_EXT_CAP_ID_MAX);
1738 perm = &ecap_perms[cap_id];
1739 cap_start = vfio_find_cap_start(vdev, *ppos);
1741 WARN_ON(cap_id > PCI_CAP_ID_MAX);
1743 perm = &cap_perms[cap_id];
1745 if (cap_id == PCI_CAP_ID_MSI)
1746 perm = vdev->msi_perm;
1748 if (cap_id > PCI_CAP_ID_BASIC)
1749 cap_start = vfio_find_cap_start(vdev, *ppos);
1753 WARN_ON(!cap_start && cap_id != PCI_CAP_ID_BASIC);
1754 WARN_ON(cap_start > *ppos);
1756 offset = *ppos - cap_start;
1762 if (copy_from_user(&val, buf, count))
1765 ret = perm->writefn(vdev, *ppos, count, perm, offset, val);
1768 ret = perm->readfn(vdev, *ppos, count,
1769 perm, offset, &val);
1774 if (copy_to_user(buf, &val, count))
1781 ssize_t vfio_pci_config_rw(struct vfio_pci_device *vdev, char __user *buf,
1782 size_t count, loff_t *ppos, bool iswrite)
1788 pos &= VFIO_PCI_OFFSET_MASK;
1791 ret = vfio_config_do_rw(vdev, buf, count, &pos, iswrite);