2 * Virtio PCI driver - common functionality for all device versions
4 * This module allows virtio devices to be used over a virtual PCI device.
5 * This can be used with QEMU based VMMs like KVM or Xen.
7 * Copyright IBM Corp. 2007
8 * Copyright Red Hat, Inc. 2014
11 * Anthony Liguori <aliguori@us.ibm.com>
12 * Rusty Russell <rusty@rustcorp.com.au>
13 * Michael S. Tsirkin <mst@redhat.com>
15 * This work is licensed under the terms of the GNU GPL, version 2 or later.
16 * See the COPYING file in the top-level directory.
20 #include "virtio_pci_common.h"
22 static bool force_legacy = false;
24 #if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY)
25 module_param(force_legacy, bool, 0444);
26 MODULE_PARM_DESC(force_legacy,
27 "Force legacy mode for transitional virtio 1 devices");
30 /* wait for pending irq handlers */
31 void vp_synchronize_vectors(struct virtio_device *vdev)
33 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
36 if (vp_dev->intx_enabled)
37 synchronize_irq(vp_dev->pci_dev->irq);
39 for (i = 0; i < vp_dev->msix_vectors; ++i)
40 synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i));
43 /* the notify function used when creating a virt queue */
44 bool vp_notify(struct virtqueue *vq)
46 /* we write the queue's selector into the notification register to
47 * signal the other end */
48 iowrite16(vq->index, (void __iomem *)vq->priv);
52 /* Handle a configuration change: Tell driver if it wants to know. */
53 static irqreturn_t vp_config_changed(int irq, void *opaque)
55 struct virtio_pci_device *vp_dev = opaque;
57 virtio_config_changed(&vp_dev->vdev);
61 /* Notify all virtqueues on an interrupt. */
62 static irqreturn_t vp_vring_interrupt(int irq, void *opaque)
64 struct virtio_pci_device *vp_dev = opaque;
65 struct virtio_pci_vq_info *info;
66 irqreturn_t ret = IRQ_NONE;
69 spin_lock_irqsave(&vp_dev->lock, flags);
70 list_for_each_entry(info, &vp_dev->virtqueues, node) {
71 if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
74 spin_unlock_irqrestore(&vp_dev->lock, flags);
79 /* A small wrapper to also acknowledge the interrupt when it's handled.
80 * I really need an EIO hook for the vring so I can ack the interrupt once we
81 * know that we'll be handling the IRQ but before we invoke the callback since
82 * the callback may notify the host which results in the host attempting to
83 * raise an interrupt that we would then mask once we acknowledged the
85 static irqreturn_t vp_interrupt(int irq, void *opaque)
87 struct virtio_pci_device *vp_dev = opaque;
90 /* reading the ISR has the effect of also clearing it so it's very
91 * important to save off the value. */
92 isr = ioread8(vp_dev->isr);
94 /* It's definitely not us if the ISR was not high */
98 /* Configuration change? Tell driver if it wants to know. */
99 if (isr & VIRTIO_PCI_ISR_CONFIG)
100 vp_config_changed(irq, opaque);
102 return vp_vring_interrupt(irq, opaque);
105 static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
106 bool per_vq_vectors, struct irq_affinity *desc)
108 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
109 const char *name = dev_name(&vp_dev->vdev.dev);
113 vp_dev->msix_vectors = nvectors;
115 vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names,
117 if (!vp_dev->msix_names)
119 vp_dev->msix_affinity_masks
120 = kzalloc(nvectors * sizeof *vp_dev->msix_affinity_masks,
122 if (!vp_dev->msix_affinity_masks)
124 for (i = 0; i < nvectors; ++i)
125 if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
129 err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
130 nvectors, PCI_IRQ_MSIX |
131 (desc ? PCI_IRQ_AFFINITY : 0),
135 vp_dev->msix_enabled = 1;
137 /* Set the vector used for configuration */
138 v = vp_dev->msix_used_vectors;
139 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
141 err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
142 vp_config_changed, 0, vp_dev->msix_names[v],
146 ++vp_dev->msix_used_vectors;
148 v = vp_dev->config_vector(vp_dev, v);
149 /* Verify we had enough resources to assign the vector */
150 if (v == VIRTIO_MSI_NO_VECTOR) {
155 if (!per_vq_vectors) {
156 /* Shared vector for all VQs */
157 v = vp_dev->msix_used_vectors;
158 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
159 "%s-virtqueues", name);
160 err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
161 vp_vring_interrupt, 0, vp_dev->msix_names[v],
165 ++vp_dev->msix_used_vectors;
172 static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index,
173 void (*callback)(struct virtqueue *vq),
177 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
178 struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL);
179 struct virtqueue *vq;
182 /* fill out our structure that represents an active queue */
184 return ERR_PTR(-ENOMEM);
186 vq = vp_dev->setup_vq(vp_dev, info, index, callback, name,
193 spin_lock_irqsave(&vp_dev->lock, flags);
194 list_add(&info->node, &vp_dev->virtqueues);
195 spin_unlock_irqrestore(&vp_dev->lock, flags);
197 INIT_LIST_HEAD(&info->node);
200 vp_dev->vqs[index] = info;
208 static void vp_del_vq(struct virtqueue *vq)
210 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
211 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
214 spin_lock_irqsave(&vp_dev->lock, flags);
215 list_del(&info->node);
216 spin_unlock_irqrestore(&vp_dev->lock, flags);
218 vp_dev->del_vq(info);
222 /* the config->del_vqs() implementation */
223 void vp_del_vqs(struct virtio_device *vdev)
225 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
226 struct virtqueue *vq, *n;
229 list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
230 if (vp_dev->per_vq_vectors) {
231 int v = vp_dev->vqs[vq->index]->msix_vector;
233 if (v != VIRTIO_MSI_NO_VECTOR) {
234 int irq = pci_irq_vector(vp_dev->pci_dev, v);
236 irq_set_affinity_hint(irq, NULL);
242 vp_dev->per_vq_vectors = false;
244 if (vp_dev->intx_enabled) {
245 free_irq(vp_dev->pci_dev->irq, vp_dev);
246 vp_dev->intx_enabled = 0;
249 for (i = 0; i < vp_dev->msix_used_vectors; ++i)
250 free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev);
252 for (i = 0; i < vp_dev->msix_vectors; i++)
253 if (vp_dev->msix_affinity_masks[i])
254 free_cpumask_var(vp_dev->msix_affinity_masks[i]);
256 if (vp_dev->msix_enabled) {
257 /* Disable the vector used for configuration */
258 vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
260 pci_free_irq_vectors(vp_dev->pci_dev);
261 vp_dev->msix_enabled = 0;
264 vp_dev->msix_vectors = 0;
265 vp_dev->msix_used_vectors = 0;
266 kfree(vp_dev->msix_names);
267 vp_dev->msix_names = NULL;
268 kfree(vp_dev->msix_affinity_masks);
269 vp_dev->msix_affinity_masks = NULL;
274 static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
275 struct virtqueue *vqs[], vq_callback_t *callbacks[],
276 const char * const names[], bool per_vq_vectors,
277 struct irq_affinity *desc)
279 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
281 int i, err, nvectors, allocated_vectors;
283 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
287 if (per_vq_vectors) {
288 /* Best option: one for change interrupt, one per vq. */
290 for (i = 0; i < nvqs; ++i)
294 /* Second best: one for change, shared for all vqs. */
298 err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors,
299 per_vq_vectors ? desc : NULL);
303 vp_dev->per_vq_vectors = per_vq_vectors;
304 allocated_vectors = vp_dev->msix_used_vectors;
305 for (i = 0; i < nvqs; ++i) {
312 msix_vec = VIRTIO_MSI_NO_VECTOR;
313 else if (vp_dev->per_vq_vectors)
314 msix_vec = allocated_vectors++;
316 msix_vec = VP_MSIX_VQ_VECTOR;
317 vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
319 if (IS_ERR(vqs[i])) {
320 err = PTR_ERR(vqs[i]);
324 if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR)
327 /* allocate per-vq irq if available and necessary */
328 snprintf(vp_dev->msix_names[msix_vec],
329 sizeof *vp_dev->msix_names,
331 dev_name(&vp_dev->vdev.dev), names[i]);
332 err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
334 vp_dev->msix_names[msix_vec],
346 static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
347 struct virtqueue *vqs[], vq_callback_t *callbacks[],
348 const char * const names[])
350 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
353 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
357 err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
358 dev_name(&vdev->dev), vp_dev);
362 vp_dev->intx_enabled = 1;
363 vp_dev->per_vq_vectors = false;
364 for (i = 0; i < nvqs; ++i) {
369 vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
370 VIRTIO_MSI_NO_VECTOR);
371 if (IS_ERR(vqs[i])) {
372 err = PTR_ERR(vqs[i]);
383 /* the config->find_vqs() implementation */
384 int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
385 struct virtqueue *vqs[], vq_callback_t *callbacks[],
386 const char * const names[], struct irq_affinity *desc)
390 /* Try MSI-X with one vector per queue. */
391 err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, true, desc);
394 /* Fallback: MSI-X with one vector for config, one shared for queues. */
395 err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false, desc);
398 /* Finally fall back to regular interrupts. */
399 return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names);
402 const char *vp_bus_name(struct virtio_device *vdev)
404 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
406 return pci_name(vp_dev->pci_dev);
409 /* Setup the affinity for a virtqueue:
410 * - force the affinity for per vq vector
411 * - OR over all affinities for shared MSI
412 * - ignore the affinity request if we're using INTX
414 int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
416 struct virtio_device *vdev = vq->vdev;
417 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
418 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
419 struct cpumask *mask;
425 if (vp_dev->msix_enabled) {
426 mask = vp_dev->msix_affinity_masks[info->msix_vector];
427 irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector);
429 irq_set_affinity_hint(irq, NULL);
432 cpumask_set_cpu(cpu, mask);
433 irq_set_affinity_hint(irq, mask);
439 const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index)
441 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
443 if (!vp_dev->per_vq_vectors ||
444 vp_dev->vqs[index]->msix_vector == VIRTIO_MSI_NO_VECTOR)
447 return pci_irq_get_affinity(vp_dev->pci_dev,
448 vp_dev->vqs[index]->msix_vector);
451 #ifdef CONFIG_PM_SLEEP
452 static int virtio_pci_freeze(struct device *dev)
454 struct pci_dev *pci_dev = to_pci_dev(dev);
455 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
458 ret = virtio_device_freeze(&vp_dev->vdev);
461 pci_disable_device(pci_dev);
465 static int virtio_pci_restore(struct device *dev)
467 struct pci_dev *pci_dev = to_pci_dev(dev);
468 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
471 ret = pci_enable_device(pci_dev);
475 pci_set_master(pci_dev);
476 return virtio_device_restore(&vp_dev->vdev);
479 static const struct dev_pm_ops virtio_pci_pm_ops = {
480 SET_SYSTEM_SLEEP_PM_OPS(virtio_pci_freeze, virtio_pci_restore)
485 /* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
486 static const struct pci_device_id virtio_pci_id_table[] = {
487 { PCI_DEVICE(PCI_VENDOR_ID_REDHAT_QUMRANET, PCI_ANY_ID) },
491 MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
493 static void virtio_pci_release_dev(struct device *_d)
495 struct virtio_device *vdev = dev_to_virtio(_d);
496 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
498 /* As struct device is a kobject, it's not safe to
499 * free the memory (including the reference counter itself)
500 * until it's release callback. */
504 static int virtio_pci_probe(struct pci_dev *pci_dev,
505 const struct pci_device_id *id)
507 struct virtio_pci_device *vp_dev;
510 /* allocate our structure and fill it out */
511 vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL);
515 pci_set_drvdata(pci_dev, vp_dev);
516 vp_dev->vdev.dev.parent = &pci_dev->dev;
517 vp_dev->vdev.dev.release = virtio_pci_release_dev;
518 vp_dev->pci_dev = pci_dev;
519 INIT_LIST_HEAD(&vp_dev->virtqueues);
520 spin_lock_init(&vp_dev->lock);
522 /* enable the device */
523 rc = pci_enable_device(pci_dev);
525 goto err_enable_device;
528 rc = virtio_pci_legacy_probe(vp_dev);
529 /* Also try modern mode if we can't map BAR0 (no IO space). */
530 if (rc == -ENODEV || rc == -ENOMEM)
531 rc = virtio_pci_modern_probe(vp_dev);
535 rc = virtio_pci_modern_probe(vp_dev);
537 rc = virtio_pci_legacy_probe(vp_dev);
542 pci_set_master(pci_dev);
544 rc = register_virtio_device(&vp_dev->vdev);
552 virtio_pci_legacy_remove(vp_dev);
554 virtio_pci_modern_remove(vp_dev);
556 pci_disable_device(pci_dev);
562 static void virtio_pci_remove(struct pci_dev *pci_dev)
564 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
565 struct device *dev = get_device(&vp_dev->vdev.dev);
567 unregister_virtio_device(&vp_dev->vdev);
570 virtio_pci_legacy_remove(vp_dev);
572 virtio_pci_modern_remove(vp_dev);
574 pci_disable_device(pci_dev);
578 static struct pci_driver virtio_pci_driver = {
579 .name = "virtio-pci",
580 .id_table = virtio_pci_id_table,
581 .probe = virtio_pci_probe,
582 .remove = virtio_pci_remove,
583 #ifdef CONFIG_PM_SLEEP
584 .driver.pm = &virtio_pci_pm_ops,
588 module_pci_driver(virtio_pci_driver);
590 MODULE_AUTHOR("Anthony Liguori <aliguori@us.ibm.com>");
591 MODULE_DESCRIPTION("virtio-pci");
592 MODULE_LICENSE("GPL");