1 /* Virtio ring implementation.
3 * Copyright 2007 Rusty Russell IBM Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 #include <linux/virtio.h>
20 #include <linux/virtio_ring.h>
21 #include <linux/virtio_config.h>
22 #include <linux/device.h>
23 #include <linux/slab.h>
24 #include <linux/module.h>
25 #include <linux/hrtimer.h>
26 #include <linux/kmemleak.h>
27 #include <linux/dma-mapping.h>
31 /* For development, we want to crash whenever the ring is screwed. */
32 #define BAD_RING(_vq, fmt, args...) \
34 dev_err(&(_vq)->vq.vdev->dev, \
35 "%s:"fmt, (_vq)->vq.name, ##args); \
38 /* Caller is supposed to guarantee no reentry. */
39 #define START_USE(_vq) \
42 panic("%s:in_use = %i\n", \
43 (_vq)->vq.name, (_vq)->in_use); \
44 (_vq)->in_use = __LINE__; \
46 #define END_USE(_vq) \
47 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
49 #define BAD_RING(_vq, fmt, args...) \
51 dev_err(&_vq->vq.vdev->dev, \
52 "%s:"fmt, (_vq)->vq.name, ##args); \
53 (_vq)->broken = true; \
59 struct vring_desc_state {
60 void *data; /* Data for callback. */
61 struct vring_desc *indir_desc; /* Indirect descriptor, if any. */
64 struct vring_virtqueue {
67 /* Actual memory layout for this queue */
70 /* Can we use weak barriers? */
73 /* Other side has made a mess, don't try any more. */
76 /* Host supports indirect buffers */
79 /* Host publishes avail event idx */
82 /* Head of free buffer list. */
83 unsigned int free_head;
84 /* Number we've added since last sync. */
85 unsigned int num_added;
87 /* Last used index we've seen. */
90 /* Last written value to avail->flags */
91 u16 avail_flags_shadow;
93 /* Last written value to avail->idx in guest byte order */
96 /* How to notify other side. FIXME: commonalize hcalls! */
97 bool (*notify)(struct virtqueue *vq);
99 /* DMA, allocation, and size information */
101 size_t queue_size_in_bytes;
102 dma_addr_t queue_dma_addr;
105 /* They're supposed to lock for us. */
108 /* Figure out if their kicks are too delayed. */
109 bool last_add_time_valid;
110 ktime_t last_add_time;
113 /* Per-descriptor state. */
114 struct vring_desc_state desc_state[];
117 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
120 * Modern virtio devices have feature bits to specify whether they need a
121 * quirk and bypass the IOMMU. If not there, just use the DMA API.
123 * If there, the interaction between virtio and DMA API is messy.
125 * On most systems with virtio, physical addresses match bus addresses,
126 * and it doesn't particularly matter whether we use the DMA API.
128 * On some systems, including Xen and any system with a physical device
129 * that speaks virtio behind a physical IOMMU, we must use the DMA API
130 * for virtio DMA to work at all.
132 * On other systems, including SPARC and PPC64, virtio-pci devices are
133 * enumerated as though they are behind an IOMMU, but the virtio host
134 * ignores the IOMMU, so we must either pretend that the IOMMU isn't
135 * there or somehow map everything as the identity.
137 * For the time being, we preserve historic behavior and bypass the DMA
140 * TODO: install a per-device DMA ops structure that does the right thing
141 * taking into account all the above quirks, and use the DMA API
142 * unconditionally on data path.
145 static bool vring_use_dma_api(struct virtio_device *vdev)
147 if (!virtio_has_iommu_quirk(vdev))
150 /* Otherwise, we are left to guess. */
152 * In theory, it's possible to have a buggy QEMU-supposed
153 * emulated Q35 IOMMU and Xen enabled at the same time. On
154 * such a configuration, virtio has never worked and will
155 * not work without an even larger kludge. Instead, enable
156 * the DMA API if we're a Xen guest, which at least allows
157 * all of the sensible Xen configurations to work correctly.
163 * On ARM-based machines, the DMA ops will do the right thing,
164 * so always use them with legacy devices.
166 if (IS_ENABLED(CONFIG_ARM) || IS_ENABLED(CONFIG_ARM64))
167 return !virtio_has_feature(vdev, VIRTIO_F_VERSION_1);
173 * The DMA ops on various arches are rather gnarly right now, and
174 * making all of the arch DMA ops work on the vring device itself
175 * is a mess. For now, we use the parent device for DMA ops.
177 static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
179 return vq->vq.vdev->dev.parent;
182 /* Map one sg entry. */
183 static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
184 struct scatterlist *sg,
185 enum dma_data_direction direction)
187 if (!vring_use_dma_api(vq->vq.vdev))
188 return (dma_addr_t)sg_phys(sg);
191 * We can't use dma_map_sg, because we don't use scatterlists in
192 * the way it expects (we don't guarantee that the scatterlist
193 * will exist for the lifetime of the mapping).
195 return dma_map_page(vring_dma_dev(vq),
196 sg_page(sg), sg->offset, sg->length,
200 static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
201 void *cpu_addr, size_t size,
202 enum dma_data_direction direction)
204 if (!vring_use_dma_api(vq->vq.vdev))
205 return (dma_addr_t)virt_to_phys(cpu_addr);
207 return dma_map_single(vring_dma_dev(vq),
208 cpu_addr, size, direction);
211 static void vring_unmap_one(const struct vring_virtqueue *vq,
212 struct vring_desc *desc)
216 if (!vring_use_dma_api(vq->vq.vdev))
219 flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
221 if (flags & VRING_DESC_F_INDIRECT) {
222 dma_unmap_single(vring_dma_dev(vq),
223 virtio64_to_cpu(vq->vq.vdev, desc->addr),
224 virtio32_to_cpu(vq->vq.vdev, desc->len),
225 (flags & VRING_DESC_F_WRITE) ?
226 DMA_FROM_DEVICE : DMA_TO_DEVICE);
228 dma_unmap_page(vring_dma_dev(vq),
229 virtio64_to_cpu(vq->vq.vdev, desc->addr),
230 virtio32_to_cpu(vq->vq.vdev, desc->len),
231 (flags & VRING_DESC_F_WRITE) ?
232 DMA_FROM_DEVICE : DMA_TO_DEVICE);
236 static int vring_mapping_error(const struct vring_virtqueue *vq,
239 if (!vring_use_dma_api(vq->vq.vdev))
242 return dma_mapping_error(vring_dma_dev(vq), addr);
245 static struct vring_desc *alloc_indirect(struct virtqueue *_vq,
246 unsigned int total_sg, gfp_t gfp)
248 struct vring_desc *desc;
252 * We require lowmem mappings for the descriptors because
253 * otherwise virt_to_phys will give us bogus addresses in the
256 gfp &= ~__GFP_HIGHMEM;
258 desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp);
262 for (i = 0; i < total_sg; i++)
263 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
267 static inline int virtqueue_add(struct virtqueue *_vq,
268 struct scatterlist *sgs[],
269 unsigned int total_sg,
270 unsigned int out_sgs,
275 struct vring_virtqueue *vq = to_vvq(_vq);
276 struct scatterlist *sg;
277 struct vring_desc *desc;
278 unsigned int i, n, avail, descs_used, uninitialized_var(prev), err_idx;
284 BUG_ON(data == NULL);
286 if (unlikely(vq->broken)) {
293 ktime_t now = ktime_get();
295 /* No kick or get, with .1 second between? Warn. */
296 if (vq->last_add_time_valid)
297 WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time))
299 vq->last_add_time = now;
300 vq->last_add_time_valid = true;
304 BUG_ON(total_sg > vq->vring.num);
305 BUG_ON(total_sg == 0);
307 head = vq->free_head;
309 /* If the host supports indirect descriptor tables, and we have multiple
310 * buffers, then go indirect. FIXME: tune this threshold */
311 if (vq->indirect && total_sg > 1 && vq->vq.num_free)
312 desc = alloc_indirect(_vq, total_sg, gfp);
317 /* Use a single buffer which doesn't continue */
319 /* Set up rest to use this indirect table. */
324 desc = vq->vring.desc;
326 descs_used = total_sg;
329 if (vq->vq.num_free < descs_used) {
330 pr_debug("Can't add buf len %i - avail = %i\n",
331 descs_used, vq->vq.num_free);
332 /* FIXME: for historical reasons, we force a notify here if
333 * there are outgoing parts to the buffer. Presumably the
334 * host should service the ring ASAP. */
343 for (n = 0; n < out_sgs; n++) {
344 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
345 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
346 if (vring_mapping_error(vq, addr))
349 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
350 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
351 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
353 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
356 for (; n < (out_sgs + in_sgs); n++) {
357 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
358 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
359 if (vring_mapping_error(vq, addr))
362 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
363 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
364 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
366 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
369 /* Last one doesn't continue. */
370 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
373 /* Now that the indirect table is filled in, map it. */
374 dma_addr_t addr = vring_map_single(
375 vq, desc, total_sg * sizeof(struct vring_desc),
377 if (vring_mapping_error(vq, addr))
380 vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT);
381 vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, addr);
383 vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc));
386 /* We're using some buffers from the free list. */
387 vq->vq.num_free -= descs_used;
389 /* Update free pointer */
391 vq->free_head = virtio16_to_cpu(_vq->vdev, vq->vring.desc[head].next);
395 /* Store token and indirect buffer state. */
396 vq->desc_state[head].data = data;
398 vq->desc_state[head].indir_desc = desc;
400 /* Put entry in available array (but don't update avail->idx until they
402 avail = vq->avail_idx_shadow & (vq->vring.num - 1);
403 vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
405 /* Descriptors and available array need to be set before we expose the
406 * new available array entries. */
407 virtio_wmb(vq->weak_barriers);
408 vq->avail_idx_shadow++;
409 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
412 pr_debug("Added buffer head %i to %p\n", head, vq);
415 /* This is very unlikely, but theoretically possible. Kick
417 if (unlikely(vq->num_added == (1 << 16) - 1))
426 for (n = 0; n < total_sg; n++) {
429 vring_unmap_one(vq, &desc[i]);
430 i = virtio16_to_cpu(_vq->vdev, vq->vring.desc[i].next);
433 vq->vq.num_free += total_sg;
443 * virtqueue_add_sgs - expose buffers to other end
444 * @vq: the struct virtqueue we're talking about.
445 * @sgs: array of terminated scatterlists.
446 * @out_num: the number of scatterlists readable by other side
447 * @in_num: the number of scatterlists which are writable (after readable ones)
448 * @data: the token identifying the buffer.
449 * @gfp: how to do memory allocations (if necessary).
451 * Caller must ensure we don't call this with other virtqueue operations
452 * at the same time (except where noted).
454 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
456 int virtqueue_add_sgs(struct virtqueue *_vq,
457 struct scatterlist *sgs[],
458 unsigned int out_sgs,
463 unsigned int i, total_sg = 0;
465 /* Count them first. */
466 for (i = 0; i < out_sgs + in_sgs; i++) {
467 struct scatterlist *sg;
468 for (sg = sgs[i]; sg; sg = sg_next(sg))
471 return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, data, gfp);
473 EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
476 * virtqueue_add_outbuf - expose output buffers to other end
477 * @vq: the struct virtqueue we're talking about.
478 * @sg: scatterlist (must be well-formed and terminated!)
479 * @num: the number of entries in @sg readable by other side
480 * @data: the token identifying the buffer.
481 * @gfp: how to do memory allocations (if necessary).
483 * Caller must ensure we don't call this with other virtqueue operations
484 * at the same time (except where noted).
486 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
488 int virtqueue_add_outbuf(struct virtqueue *vq,
489 struct scatterlist *sg, unsigned int num,
493 return virtqueue_add(vq, &sg, num, 1, 0, data, gfp);
495 EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
498 * virtqueue_add_inbuf - expose input buffers to other end
499 * @vq: the struct virtqueue we're talking about.
500 * @sg: scatterlist (must be well-formed and terminated!)
501 * @num: the number of entries in @sg writable by other side
502 * @data: the token identifying the buffer.
503 * @gfp: how to do memory allocations (if necessary).
505 * Caller must ensure we don't call this with other virtqueue operations
506 * at the same time (except where noted).
508 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
510 int virtqueue_add_inbuf(struct virtqueue *vq,
511 struct scatterlist *sg, unsigned int num,
515 return virtqueue_add(vq, &sg, num, 0, 1, data, gfp);
517 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
520 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
521 * @vq: the struct virtqueue
523 * Instead of virtqueue_kick(), you can do:
524 * if (virtqueue_kick_prepare(vq))
525 * virtqueue_notify(vq);
527 * This is sometimes useful because the virtqueue_kick_prepare() needs
528 * to be serialized, but the actual virtqueue_notify() call does not.
530 bool virtqueue_kick_prepare(struct virtqueue *_vq)
532 struct vring_virtqueue *vq = to_vvq(_vq);
537 /* We need to expose available array entries before checking avail
539 virtio_mb(vq->weak_barriers);
541 old = vq->avail_idx_shadow - vq->num_added;
542 new = vq->avail_idx_shadow;
546 if (vq->last_add_time_valid) {
547 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
548 vq->last_add_time)) > 100);
550 vq->last_add_time_valid = false;
554 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, vring_avail_event(&vq->vring)),
557 needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(_vq->vdev, VRING_USED_F_NO_NOTIFY));
562 EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
565 * virtqueue_notify - second half of split virtqueue_kick call.
566 * @vq: the struct virtqueue
568 * This does not need to be serialized.
570 * Returns false if host notify failed or queue is broken, otherwise true.
572 bool virtqueue_notify(struct virtqueue *_vq)
574 struct vring_virtqueue *vq = to_vvq(_vq);
576 if (unlikely(vq->broken))
579 /* Prod other side to tell it about changes. */
580 if (!vq->notify(_vq)) {
586 EXPORT_SYMBOL_GPL(virtqueue_notify);
589 * virtqueue_kick - update after add_buf
590 * @vq: the struct virtqueue
592 * After one or more virtqueue_add_* calls, invoke this to kick
595 * Caller must ensure we don't call this with other virtqueue
596 * operations at the same time (except where noted).
598 * Returns false if kick failed, otherwise true.
600 bool virtqueue_kick(struct virtqueue *vq)
602 if (virtqueue_kick_prepare(vq))
603 return virtqueue_notify(vq);
606 EXPORT_SYMBOL_GPL(virtqueue_kick);
608 static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
611 __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
613 /* Clear data ptr. */
614 vq->desc_state[head].data = NULL;
616 /* Put back on free list: unmap first-level descriptors and find end */
619 while (vq->vring.desc[i].flags & nextflag) {
620 vring_unmap_one(vq, &vq->vring.desc[i]);
621 i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next);
625 vring_unmap_one(vq, &vq->vring.desc[i]);
626 vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head);
627 vq->free_head = head;
629 /* Plus final descriptor */
632 /* Free the indirect table, if any, now that it's unmapped. */
633 if (vq->desc_state[head].indir_desc) {
634 struct vring_desc *indir_desc = vq->desc_state[head].indir_desc;
635 u32 len = virtio32_to_cpu(vq->vq.vdev, vq->vring.desc[head].len);
637 BUG_ON(!(vq->vring.desc[head].flags &
638 cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)));
639 BUG_ON(len == 0 || len % sizeof(struct vring_desc));
641 for (j = 0; j < len / sizeof(struct vring_desc); j++)
642 vring_unmap_one(vq, &indir_desc[j]);
644 kfree(vq->desc_state[head].indir_desc);
645 vq->desc_state[head].indir_desc = NULL;
649 static inline bool more_used(const struct vring_virtqueue *vq)
651 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx);
655 * virtqueue_get_buf - get the next used buffer
656 * @vq: the struct virtqueue we're talking about.
657 * @len: the length written into the buffer
659 * If the device wrote data into the buffer, @len will be set to the
660 * amount written. This means you don't need to clear the buffer
661 * beforehand to ensure there's no data leakage in the case of short
664 * Caller must ensure we don't call this with other virtqueue
665 * operations at the same time (except where noted).
667 * Returns NULL if there are no used buffers, or the "data" token
668 * handed to virtqueue_add_*().
670 void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
672 struct vring_virtqueue *vq = to_vvq(_vq);
679 if (unlikely(vq->broken)) {
684 if (!more_used(vq)) {
685 pr_debug("No more buffers in queue\n");
690 /* Only get used array entries after they have been exposed by host. */
691 virtio_rmb(vq->weak_barriers);
693 last_used = (vq->last_used_idx & (vq->vring.num - 1));
694 i = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].id);
695 *len = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].len);
697 if (unlikely(i >= vq->vring.num)) {
698 BAD_RING(vq, "id %u out of range\n", i);
701 if (unlikely(!vq->desc_state[i].data)) {
702 BAD_RING(vq, "id %u is not a head!\n", i);
706 /* detach_buf clears data, so grab it now. */
707 ret = vq->desc_state[i].data;
710 /* If we expect an interrupt for the next entry, tell host
711 * by writing event index and flush out the write before
712 * the read in the next get_buf call. */
713 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
714 virtio_store_mb(vq->weak_barriers,
715 &vring_used_event(&vq->vring),
716 cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
719 vq->last_add_time_valid = false;
725 EXPORT_SYMBOL_GPL(virtqueue_get_buf);
728 * virtqueue_disable_cb - disable callbacks
729 * @vq: the struct virtqueue we're talking about.
731 * Note that this is not necessarily synchronous, hence unreliable and only
732 * useful as an optimization.
734 * Unlike other operations, this need not be serialized.
736 void virtqueue_disable_cb(struct virtqueue *_vq)
738 struct vring_virtqueue *vq = to_vvq(_vq);
740 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
741 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
743 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
747 EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
750 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
751 * @vq: the struct virtqueue we're talking about.
753 * This re-enables callbacks; it returns current queue state
754 * in an opaque unsigned value. This value should be later tested by
755 * virtqueue_poll, to detect a possible race between the driver checking for
756 * more work, and enabling callbacks.
758 * Caller must ensure we don't call this with other virtqueue
759 * operations at the same time (except where noted).
761 unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
763 struct vring_virtqueue *vq = to_vvq(_vq);
768 /* We optimistically turn back on interrupts, then check if there was
770 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
771 * either clear the flags bit or point the event index at the next
772 * entry. Always do both to keep code simple. */
773 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
774 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
776 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
778 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
780 return last_used_idx;
782 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
785 * virtqueue_poll - query pending used buffers
786 * @vq: the struct virtqueue we're talking about.
787 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
789 * Returns "true" if there are pending used buffers in the queue.
791 * This does not need to be serialized.
793 bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
795 struct vring_virtqueue *vq = to_vvq(_vq);
797 virtio_mb(vq->weak_barriers);
798 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx);
800 EXPORT_SYMBOL_GPL(virtqueue_poll);
803 * virtqueue_enable_cb - restart callbacks after disable_cb.
804 * @vq: the struct virtqueue we're talking about.
806 * This re-enables callbacks; it returns "false" if there are pending
807 * buffers in the queue, to detect a possible race between the driver
808 * checking for more work, and enabling callbacks.
810 * Caller must ensure we don't call this with other virtqueue
811 * operations at the same time (except where noted).
813 bool virtqueue_enable_cb(struct virtqueue *_vq)
815 unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
816 return !virtqueue_poll(_vq, last_used_idx);
818 EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
821 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
822 * @vq: the struct virtqueue we're talking about.
824 * This re-enables callbacks but hints to the other side to delay
825 * interrupts until most of the available buffers have been processed;
826 * it returns "false" if there are many pending buffers in the queue,
827 * to detect a possible race between the driver checking for more work,
828 * and enabling callbacks.
830 * Caller must ensure we don't call this with other virtqueue
831 * operations at the same time (except where noted).
833 bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
835 struct vring_virtqueue *vq = to_vvq(_vq);
840 /* We optimistically turn back on interrupts, then check if there was
842 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
843 * either clear the flags bit or point the event index at the next
844 * entry. Always update the event index to keep code simple. */
845 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
846 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
848 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
850 /* TODO: tune this threshold */
851 bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
853 virtio_store_mb(vq->weak_barriers,
854 &vring_used_event(&vq->vring),
855 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
857 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) {
865 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
868 * virtqueue_detach_unused_buf - detach first unused buffer
869 * @vq: the struct virtqueue we're talking about.
871 * Returns NULL or the "data" token handed to virtqueue_add_*().
872 * This is not valid on an active queue; it is useful only for device
875 void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
877 struct vring_virtqueue *vq = to_vvq(_vq);
883 for (i = 0; i < vq->vring.num; i++) {
884 if (!vq->desc_state[i].data)
886 /* detach_buf clears data, so grab it now. */
887 buf = vq->desc_state[i].data;
889 vq->avail_idx_shadow--;
890 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
894 /* That should have freed everything. */
895 BUG_ON(vq->vq.num_free != vq->vring.num);
900 EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
902 irqreturn_t vring_interrupt(int irq, void *_vq)
904 struct vring_virtqueue *vq = to_vvq(_vq);
906 if (!more_used(vq)) {
907 pr_debug("virtqueue interrupt with no work for %p\n", vq);
911 if (unlikely(vq->broken))
914 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
916 vq->vq.callback(&vq->vq);
920 EXPORT_SYMBOL_GPL(vring_interrupt);
922 struct virtqueue *__vring_new_virtqueue(unsigned int index,
924 struct virtio_device *vdev,
926 bool (*notify)(struct virtqueue *),
927 void (*callback)(struct virtqueue *),
931 struct vring_virtqueue *vq;
933 vq = kmalloc(sizeof(*vq) + vring.num * sizeof(struct vring_desc_state),
939 vq->vq.callback = callback;
942 vq->vq.num_free = vring.num;
943 vq->vq.index = index;
944 vq->we_own_ring = false;
945 vq->queue_dma_addr = 0;
946 vq->queue_size_in_bytes = 0;
948 vq->weak_barriers = weak_barriers;
950 vq->last_used_idx = 0;
951 vq->avail_flags_shadow = 0;
952 vq->avail_idx_shadow = 0;
954 list_add_tail(&vq->vq.list, &vdev->vqs);
957 vq->last_add_time_valid = false;
960 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
961 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
963 /* No callback? Tell other side not to bother us. */
965 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
967 vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
970 /* Put everything in free lists. */
972 for (i = 0; i < vring.num-1; i++)
973 vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
974 memset(vq->desc_state, 0, vring.num * sizeof(struct vring_desc_state));
978 EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
980 static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
981 dma_addr_t *dma_handle, gfp_t flag)
983 if (vring_use_dma_api(vdev)) {
984 return dma_alloc_coherent(vdev->dev.parent, size,
987 void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
989 phys_addr_t phys_addr = virt_to_phys(queue);
990 *dma_handle = (dma_addr_t)phys_addr;
993 * Sanity check: make sure we dind't truncate
994 * the address. The only arches I can find that
995 * have 64-bit phys_addr_t but 32-bit dma_addr_t
996 * are certain non-highmem MIPS and x86
997 * configurations, but these configurations
998 * should never allocate physical pages above 32
999 * bits, so this is fine. Just in case, throw a
1000 * warning and abort if we end up with an
1001 * unrepresentable address.
1003 if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
1004 free_pages_exact(queue, PAGE_ALIGN(size));
1012 static void vring_free_queue(struct virtio_device *vdev, size_t size,
1013 void *queue, dma_addr_t dma_handle)
1015 if (vring_use_dma_api(vdev)) {
1016 dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
1018 free_pages_exact(queue, PAGE_ALIGN(size));
1022 struct virtqueue *vring_create_virtqueue(
1025 unsigned int vring_align,
1026 struct virtio_device *vdev,
1028 bool may_reduce_num,
1029 bool (*notify)(struct virtqueue *),
1030 void (*callback)(struct virtqueue *),
1033 struct virtqueue *vq;
1035 dma_addr_t dma_addr;
1036 size_t queue_size_in_bytes;
1039 /* We assume num is a power of 2. */
1040 if (num & (num - 1)) {
1041 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
1045 /* TODO: allocate each queue chunk individually */
1046 for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
1047 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
1049 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1058 /* Try to get a single page. You are my only hope! */
1059 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
1060 &dma_addr, GFP_KERNEL|__GFP_ZERO);
1065 queue_size_in_bytes = vring_size(num, vring_align);
1066 vring_init(&vring, num, queue, vring_align);
1068 vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers,
1069 notify, callback, name);
1071 vring_free_queue(vdev, queue_size_in_bytes, queue,
1076 to_vvq(vq)->queue_dma_addr = dma_addr;
1077 to_vvq(vq)->queue_size_in_bytes = queue_size_in_bytes;
1078 to_vvq(vq)->we_own_ring = true;
1082 EXPORT_SYMBOL_GPL(vring_create_virtqueue);
1084 struct virtqueue *vring_new_virtqueue(unsigned int index,
1086 unsigned int vring_align,
1087 struct virtio_device *vdev,
1090 bool (*notify)(struct virtqueue *vq),
1091 void (*callback)(struct virtqueue *vq),
1095 vring_init(&vring, num, pages, vring_align);
1096 return __vring_new_virtqueue(index, vring, vdev, weak_barriers,
1097 notify, callback, name);
1099 EXPORT_SYMBOL_GPL(vring_new_virtqueue);
1101 void vring_del_virtqueue(struct virtqueue *_vq)
1103 struct vring_virtqueue *vq = to_vvq(_vq);
1105 if (vq->we_own_ring) {
1106 vring_free_queue(vq->vq.vdev, vq->queue_size_in_bytes,
1107 vq->vring.desc, vq->queue_dma_addr);
1109 list_del(&_vq->list);
1112 EXPORT_SYMBOL_GPL(vring_del_virtqueue);
1114 /* Manipulates transport-specific feature bits. */
1115 void vring_transport_features(struct virtio_device *vdev)
1119 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
1121 case VIRTIO_RING_F_INDIRECT_DESC:
1123 case VIRTIO_RING_F_EVENT_IDX:
1125 case VIRTIO_F_VERSION_1:
1127 case VIRTIO_F_IOMMU_PLATFORM:
1130 /* We don't understand this bit. */
1131 __virtio_clear_bit(vdev, i);
1135 EXPORT_SYMBOL_GPL(vring_transport_features);
1138 * virtqueue_get_vring_size - return the size of the virtqueue's vring
1139 * @vq: the struct virtqueue containing the vring of interest.
1141 * Returns the size of the vring. This is mainly used for boasting to
1142 * userspace. Unlike other operations, this need not be serialized.
1144 unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
1147 struct vring_virtqueue *vq = to_vvq(_vq);
1149 return vq->vring.num;
1151 EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
1153 bool virtqueue_is_broken(struct virtqueue *_vq)
1155 struct vring_virtqueue *vq = to_vvq(_vq);
1159 EXPORT_SYMBOL_GPL(virtqueue_is_broken);
1162 * This should prevent the device from being used, allowing drivers to
1163 * recover. You may need to grab appropriate locks to flush.
1165 void virtio_break_device(struct virtio_device *dev)
1167 struct virtqueue *_vq;
1169 list_for_each_entry(_vq, &dev->vqs, list) {
1170 struct vring_virtqueue *vq = to_vvq(_vq);
1174 EXPORT_SYMBOL_GPL(virtio_break_device);
1176 dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
1178 struct vring_virtqueue *vq = to_vvq(_vq);
1180 BUG_ON(!vq->we_own_ring);
1182 return vq->queue_dma_addr;
1184 EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
1186 dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
1188 struct vring_virtqueue *vq = to_vvq(_vq);
1190 BUG_ON(!vq->we_own_ring);
1192 return vq->queue_dma_addr +
1193 ((char *)vq->vring.avail - (char *)vq->vring.desc);
1195 EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
1197 dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
1199 struct vring_virtqueue *vq = to_vvq(_vq);
1201 BUG_ON(!vq->we_own_ring);
1203 return vq->queue_dma_addr +
1204 ((char *)vq->vring.used - (char *)vq->vring.desc);
1206 EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
1208 const struct vring *virtqueue_get_vring(struct virtqueue *vq)
1210 return &to_vvq(vq)->vring;
1212 EXPORT_SYMBOL_GPL(virtqueue_get_vring);
1214 MODULE_LICENSE("GPL");