1 /* Virtio ring implementation.
3 * Copyright 2007 Rusty Russell IBM Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 #include <linux/virtio.h>
20 #include <linux/virtio_ring.h>
21 #include <linux/virtio_config.h>
22 #include <linux/device.h>
23 #include <linux/slab.h>
24 #include <linux/module.h>
25 #include <linux/hrtimer.h>
28 /* For development, we want to crash whenever the ring is screwed. */
29 #define BAD_RING(_vq, fmt, args...) \
31 dev_err(&(_vq)->vq.vdev->dev, \
32 "%s:"fmt, (_vq)->vq.name, ##args); \
35 /* Caller is supposed to guarantee no reentry. */
36 #define START_USE(_vq) \
39 panic("%s:in_use = %i\n", \
40 (_vq)->vq.name, (_vq)->in_use); \
41 (_vq)->in_use = __LINE__; \
43 #define END_USE(_vq) \
44 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
46 #define BAD_RING(_vq, fmt, args...) \
48 dev_err(&_vq->vq.vdev->dev, \
49 "%s:"fmt, (_vq)->vq.name, ##args); \
50 (_vq)->broken = true; \
56 struct vring_virtqueue
60 /* Actual memory layout for this queue */
63 /* Can we use weak barriers? */
66 /* Other side has made a mess, don't try any more. */
69 /* Host supports indirect buffers */
72 /* Host publishes avail event idx */
75 /* Head of free buffer list. */
76 unsigned int free_head;
77 /* Number we've added since last sync. */
78 unsigned int num_added;
80 /* Last used index we've seen. */
83 /* How to notify other side. FIXME: commonalize hcalls! */
84 void (*notify)(struct virtqueue *vq);
87 /* They're supposed to lock for us. */
90 /* Figure out if their kicks are too delayed. */
91 bool last_add_time_valid;
92 ktime_t last_add_time;
95 /* Tokens for callbacks. */
99 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
101 static inline struct scatterlist *sg_next_chained(struct scatterlist *sg,
107 static inline struct scatterlist *sg_next_arr(struct scatterlist *sg,
115 /* Set up an indirect table of descriptors and add it to the queue. */
116 static inline int vring_add_indirect(struct vring_virtqueue *vq,
117 struct scatterlist *sgs[],
118 struct scatterlist *(*next)
119 (struct scatterlist *, unsigned int *),
120 unsigned int total_sg,
121 unsigned int total_out,
122 unsigned int total_in,
123 unsigned int out_sgs,
127 struct vring_desc *desc;
129 struct scatterlist *sg;
133 * We require lowmem mappings for the descriptors because
134 * otherwise virt_to_phys will give us bogus addresses in the
137 gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH);
139 desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp);
143 /* Transfer entries from the sg lists into the indirect page */
145 for (n = 0; n < out_sgs; n++) {
146 for (sg = sgs[n]; sg; sg = next(sg, &total_out)) {
147 desc[i].flags = VRING_DESC_F_NEXT;
148 desc[i].addr = sg_phys(sg);
149 desc[i].len = sg->length;
154 for (; n < (out_sgs + in_sgs); n++) {
155 for (sg = sgs[n]; sg; sg = next(sg, &total_in)) {
156 desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
157 desc[i].addr = sg_phys(sg);
158 desc[i].len = sg->length;
163 BUG_ON(i != total_sg);
165 /* Last one doesn't continue. */
166 desc[i-1].flags &= ~VRING_DESC_F_NEXT;
169 /* We're about to use a buffer */
172 /* Use a single buffer which doesn't continue */
173 head = vq->free_head;
174 vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT;
175 vq->vring.desc[head].addr = virt_to_phys(desc);
176 /* kmemleak gives a false positive, as it's hidden by virt_to_phys */
177 kmemleak_ignore(desc);
178 vq->vring.desc[head].len = i * sizeof(struct vring_desc);
180 /* Update free pointer */
181 vq->free_head = vq->vring.desc[head].next;
186 static inline int virtqueue_add(struct virtqueue *_vq,
187 struct scatterlist *sgs[],
188 struct scatterlist *(*next)
189 (struct scatterlist *, unsigned int *),
190 unsigned int total_out,
191 unsigned int total_in,
192 unsigned int out_sgs,
197 struct vring_virtqueue *vq = to_vvq(_vq);
198 struct scatterlist *sg;
199 unsigned int i, n, avail, uninitialized_var(prev), total_sg;
204 BUG_ON(data == NULL);
208 ktime_t now = ktime_get();
210 /* No kick or get, with .1 second between? Warn. */
211 if (vq->last_add_time_valid)
212 WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time))
214 vq->last_add_time = now;
215 vq->last_add_time_valid = true;
219 total_sg = total_in + total_out;
221 /* If the host supports indirect descriptor tables, and we have multiple
222 * buffers, then go indirect. FIXME: tune this threshold */
223 if (vq->indirect && total_sg > 1 && vq->vq.num_free) {
224 head = vring_add_indirect(vq, sgs, next, total_sg, total_out,
226 out_sgs, in_sgs, gfp);
227 if (likely(head >= 0))
231 BUG_ON(total_sg > vq->vring.num);
232 BUG_ON(total_sg == 0);
234 if (vq->vq.num_free < total_sg) {
235 pr_debug("Can't add buf len %i - avail = %i\n",
236 total_sg, vq->vq.num_free);
237 /* FIXME: for historical reasons, we force a notify here if
238 * there are outgoing parts to the buffer. Presumably the
239 * host should service the ring ASAP. */
246 /* We're about to use some buffers from the free list. */
247 vq->vq.num_free -= total_sg;
249 head = i = vq->free_head;
250 for (n = 0; n < out_sgs; n++) {
251 for (sg = sgs[n]; sg; sg = next(sg, &total_out)) {
252 vq->vring.desc[i].flags = VRING_DESC_F_NEXT;
253 vq->vring.desc[i].addr = sg_phys(sg);
254 vq->vring.desc[i].len = sg->length;
256 i = vq->vring.desc[i].next;
259 for (; n < (out_sgs + in_sgs); n++) {
260 for (sg = sgs[n]; sg; sg = next(sg, &total_in)) {
261 vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
262 vq->vring.desc[i].addr = sg_phys(sg);
263 vq->vring.desc[i].len = sg->length;
265 i = vq->vring.desc[i].next;
268 /* Last one doesn't continue. */
269 vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT;
271 /* Update free pointer */
276 vq->data[head] = data;
278 /* Put entry in available array (but don't update avail->idx until they
280 avail = (vq->vring.avail->idx & (vq->vring.num-1));
281 vq->vring.avail->ring[avail] = head;
283 /* Descriptors and available array need to be set before we expose the
284 * new available array entries. */
285 virtio_wmb(vq->weak_barriers);
286 vq->vring.avail->idx++;
289 /* This is very unlikely, but theoretically possible. Kick
291 if (unlikely(vq->num_added == (1 << 16) - 1))
294 pr_debug("Added buffer head %i to %p\n", head, vq);
301 * virtqueue_add_sgs - expose buffers to other end
302 * @vq: the struct virtqueue we're talking about.
303 * @sgs: array of terminated scatterlists.
304 * @out_num: the number of scatterlists readable by other side
305 * @in_num: the number of scatterlists which are writable (after readable ones)
306 * @data: the token identifying the buffer.
307 * @gfp: how to do memory allocations (if necessary).
309 * Caller must ensure we don't call this with other virtqueue operations
310 * at the same time (except where noted).
312 * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
314 int virtqueue_add_sgs(struct virtqueue *_vq,
315 struct scatterlist *sgs[],
316 unsigned int out_sgs,
321 unsigned int i, total_out, total_in;
323 /* Count them first. */
324 for (i = total_out = total_in = 0; i < out_sgs; i++) {
325 struct scatterlist *sg;
326 for (sg = sgs[i]; sg; sg = sg_next(sg))
329 for (; i < out_sgs + in_sgs; i++) {
330 struct scatterlist *sg;
331 for (sg = sgs[i]; sg; sg = sg_next(sg))
334 return virtqueue_add(_vq, sgs, sg_next_chained,
335 total_out, total_in, out_sgs, in_sgs, data, gfp);
337 EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
340 * virtqueue_add_outbuf - expose output buffers to other end
341 * @vq: the struct virtqueue we're talking about.
342 * @sgs: array of scatterlists (need not be terminated!)
343 * @num: the number of scatterlists readable by other side
344 * @data: the token identifying the buffer.
345 * @gfp: how to do memory allocations (if necessary).
347 * Caller must ensure we don't call this with other virtqueue operations
348 * at the same time (except where noted).
350 * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
352 int virtqueue_add_outbuf(struct virtqueue *vq,
353 struct scatterlist sg[], unsigned int num,
357 return virtqueue_add(vq, &sg, sg_next_arr, num, 0, 1, 0, data, gfp);
359 EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
362 * virtqueue_add_inbuf - expose input buffers to other end
363 * @vq: the struct virtqueue we're talking about.
364 * @sgs: array of scatterlists (need not be terminated!)
365 * @num: the number of scatterlists writable by other side
366 * @data: the token identifying the buffer.
367 * @gfp: how to do memory allocations (if necessary).
369 * Caller must ensure we don't call this with other virtqueue operations
370 * at the same time (except where noted).
372 * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
374 int virtqueue_add_inbuf(struct virtqueue *vq,
375 struct scatterlist sg[], unsigned int num,
379 return virtqueue_add(vq, &sg, sg_next_arr, 0, num, 0, 1, data, gfp);
381 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
384 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
385 * @vq: the struct virtqueue
387 * Instead of virtqueue_kick(), you can do:
388 * if (virtqueue_kick_prepare(vq))
389 * virtqueue_notify(vq);
391 * This is sometimes useful because the virtqueue_kick_prepare() needs
392 * to be serialized, but the actual virtqueue_notify() call does not.
394 bool virtqueue_kick_prepare(struct virtqueue *_vq)
396 struct vring_virtqueue *vq = to_vvq(_vq);
401 /* We need to expose available array entries before checking avail
403 virtio_mb(vq->weak_barriers);
405 old = vq->vring.avail->idx - vq->num_added;
406 new = vq->vring.avail->idx;
410 if (vq->last_add_time_valid) {
411 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
412 vq->last_add_time)) > 100);
414 vq->last_add_time_valid = false;
418 needs_kick = vring_need_event(vring_avail_event(&vq->vring),
421 needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY);
426 EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
429 * virtqueue_notify - second half of split virtqueue_kick call.
430 * @vq: the struct virtqueue
432 * This does not need to be serialized.
434 void virtqueue_notify(struct virtqueue *_vq)
436 struct vring_virtqueue *vq = to_vvq(_vq);
438 /* Prod other side to tell it about changes. */
441 EXPORT_SYMBOL_GPL(virtqueue_notify);
444 * virtqueue_kick - update after add_buf
445 * @vq: the struct virtqueue
447 * After one or more virtqueue_add_* calls, invoke this to kick
450 * Caller must ensure we don't call this with other virtqueue
451 * operations at the same time (except where noted).
453 void virtqueue_kick(struct virtqueue *vq)
455 if (virtqueue_kick_prepare(vq))
456 virtqueue_notify(vq);
458 EXPORT_SYMBOL_GPL(virtqueue_kick);
460 static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
464 /* Clear data ptr. */
465 vq->data[head] = NULL;
467 /* Put back on free list: find end */
470 /* Free the indirect table */
471 if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT)
472 kfree(phys_to_virt(vq->vring.desc[i].addr));
474 while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
475 i = vq->vring.desc[i].next;
479 vq->vring.desc[i].next = vq->free_head;
480 vq->free_head = head;
481 /* Plus final descriptor */
485 static inline bool more_used(const struct vring_virtqueue *vq)
487 return vq->last_used_idx != vq->vring.used->idx;
491 * virtqueue_get_buf - get the next used buffer
492 * @vq: the struct virtqueue we're talking about.
493 * @len: the length written into the buffer
495 * If the driver wrote data into the buffer, @len will be set to the
496 * amount written. This means you don't need to clear the buffer
497 * beforehand to ensure there's no data leakage in the case of short
500 * Caller must ensure we don't call this with other virtqueue
501 * operations at the same time (except where noted).
503 * Returns NULL if there are no used buffers, or the "data" token
504 * handed to virtqueue_add_*().
506 void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
508 struct vring_virtqueue *vq = to_vvq(_vq);
515 if (unlikely(vq->broken)) {
520 if (!more_used(vq)) {
521 pr_debug("No more buffers in queue\n");
526 /* Only get used array entries after they have been exposed by host. */
527 virtio_rmb(vq->weak_barriers);
529 last_used = (vq->last_used_idx & (vq->vring.num - 1));
530 i = vq->vring.used->ring[last_used].id;
531 *len = vq->vring.used->ring[last_used].len;
533 if (unlikely(i >= vq->vring.num)) {
534 BAD_RING(vq, "id %u out of range\n", i);
537 if (unlikely(!vq->data[i])) {
538 BAD_RING(vq, "id %u is not a head!\n", i);
542 /* detach_buf clears data, so grab it now. */
546 /* If we expect an interrupt for the next entry, tell host
547 * by writing event index and flush out the write before
548 * the read in the next get_buf call. */
549 if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
550 vring_used_event(&vq->vring) = vq->last_used_idx;
551 virtio_mb(vq->weak_barriers);
555 vq->last_add_time_valid = false;
561 EXPORT_SYMBOL_GPL(virtqueue_get_buf);
564 * virtqueue_disable_cb - disable callbacks
565 * @vq: the struct virtqueue we're talking about.
567 * Note that this is not necessarily synchronous, hence unreliable and only
568 * useful as an optimization.
570 * Unlike other operations, this need not be serialized.
572 void virtqueue_disable_cb(struct virtqueue *_vq)
574 struct vring_virtqueue *vq = to_vvq(_vq);
576 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
578 EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
581 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
582 * @vq: the struct virtqueue we're talking about.
584 * This re-enables callbacks; it returns current queue state
585 * in an opaque unsigned value. This value should be later tested by
586 * virtqueue_poll, to detect a possible race between the driver checking for
587 * more work, and enabling callbacks.
589 * Caller must ensure we don't call this with other virtqueue
590 * operations at the same time (except where noted).
592 unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
594 struct vring_virtqueue *vq = to_vvq(_vq);
599 /* We optimistically turn back on interrupts, then check if there was
601 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
602 * either clear the flags bit or point the event index at the next
603 * entry. Always do both to keep code simple. */
604 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
605 vring_used_event(&vq->vring) = last_used_idx = vq->last_used_idx;
607 return last_used_idx;
609 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
612 * virtqueue_poll - query pending used buffers
613 * @vq: the struct virtqueue we're talking about.
614 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
616 * Returns "true" if there are pending used buffers in the queue.
618 * This does not need to be serialized.
620 bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
622 struct vring_virtqueue *vq = to_vvq(_vq);
624 virtio_mb(vq->weak_barriers);
625 return (u16)last_used_idx != vq->vring.used->idx;
627 EXPORT_SYMBOL_GPL(virtqueue_poll);
630 * virtqueue_enable_cb - restart callbacks after disable_cb.
631 * @vq: the struct virtqueue we're talking about.
633 * This re-enables callbacks; it returns "false" if there are pending
634 * buffers in the queue, to detect a possible race between the driver
635 * checking for more work, and enabling callbacks.
637 * Caller must ensure we don't call this with other virtqueue
638 * operations at the same time (except where noted).
640 bool virtqueue_enable_cb(struct virtqueue *_vq)
642 unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
643 return !virtqueue_poll(_vq, last_used_idx);
645 EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
648 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
649 * @vq: the struct virtqueue we're talking about.
651 * This re-enables callbacks but hints to the other side to delay
652 * interrupts until most of the available buffers have been processed;
653 * it returns "false" if there are many pending buffers in the queue,
654 * to detect a possible race between the driver checking for more work,
655 * and enabling callbacks.
657 * Caller must ensure we don't call this with other virtqueue
658 * operations at the same time (except where noted).
660 bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
662 struct vring_virtqueue *vq = to_vvq(_vq);
667 /* We optimistically turn back on interrupts, then check if there was
669 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
670 * either clear the flags bit or point the event index at the next
671 * entry. Always do both to keep code simple. */
672 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
673 /* TODO: tune this threshold */
674 bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4;
675 vring_used_event(&vq->vring) = vq->last_used_idx + bufs;
676 virtio_mb(vq->weak_barriers);
677 if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) {
685 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
688 * virtqueue_detach_unused_buf - detach first unused buffer
689 * @vq: the struct virtqueue we're talking about.
691 * Returns NULL or the "data" token handed to virtqueue_add_*().
692 * This is not valid on an active queue; it is useful only for device
695 void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
697 struct vring_virtqueue *vq = to_vvq(_vq);
703 for (i = 0; i < vq->vring.num; i++) {
706 /* detach_buf clears data, so grab it now. */
709 vq->vring.avail->idx--;
713 /* That should have freed everything. */
714 BUG_ON(vq->vq.num_free != vq->vring.num);
719 EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
721 irqreturn_t vring_interrupt(int irq, void *_vq)
723 struct vring_virtqueue *vq = to_vvq(_vq);
725 if (!more_used(vq)) {
726 pr_debug("virtqueue interrupt with no work for %p\n", vq);
730 if (unlikely(vq->broken))
733 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
735 vq->vq.callback(&vq->vq);
739 EXPORT_SYMBOL_GPL(vring_interrupt);
741 struct virtqueue *vring_new_virtqueue(unsigned int index,
743 unsigned int vring_align,
744 struct virtio_device *vdev,
747 void (*notify)(struct virtqueue *),
748 void (*callback)(struct virtqueue *),
751 struct vring_virtqueue *vq;
754 /* We assume num is a power of 2. */
755 if (num & (num - 1)) {
756 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
760 vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL);
764 vring_init(&vq->vring, num, pages, vring_align);
765 vq->vq.callback = callback;
768 vq->vq.num_free = num;
769 vq->vq.index = index;
771 vq->weak_barriers = weak_barriers;
773 vq->last_used_idx = 0;
775 list_add_tail(&vq->vq.list, &vdev->vqs);
778 vq->last_add_time_valid = false;
781 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
782 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
784 /* No callback? Tell other side not to bother us. */
786 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
788 /* Put everything in free lists. */
790 for (i = 0; i < num-1; i++) {
791 vq->vring.desc[i].next = i+1;
798 EXPORT_SYMBOL_GPL(vring_new_virtqueue);
800 void vring_del_virtqueue(struct virtqueue *vq)
805 EXPORT_SYMBOL_GPL(vring_del_virtqueue);
807 /* Manipulates transport-specific feature bits. */
808 void vring_transport_features(struct virtio_device *vdev)
812 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
814 case VIRTIO_RING_F_INDIRECT_DESC:
816 case VIRTIO_RING_F_EVENT_IDX:
819 /* We don't understand this bit. */
820 clear_bit(i, vdev->features);
824 EXPORT_SYMBOL_GPL(vring_transport_features);
827 * virtqueue_get_vring_size - return the size of the virtqueue's vring
828 * @vq: the struct virtqueue containing the vring of interest.
830 * Returns the size of the vring. This is mainly used for boasting to
831 * userspace. Unlike other operations, this need not be serialized.
833 unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
836 struct vring_virtqueue *vq = to_vvq(_vq);
838 return vq->vring.num;
840 EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
842 MODULE_LICENSE("GPL");