1 /* Virtio ring implementation.
3 * Copyright 2007 Rusty Russell IBM Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 #include <linux/virtio.h>
20 #include <linux/virtio_ring.h>
21 #include <linux/virtio_config.h>
22 #include <linux/device.h>
23 #include <linux/slab.h>
24 #include <linux/module.h>
26 /* virtio guest is communicating with a virtual "device" that actually runs on
27 * a host processor. Memory barriers are used to control SMP effects. */
29 /* Where possible, use SMP barriers which are more lightweight than mandatory
30 * barriers, because mandatory barriers control MMIO effects on accesses
31 * through relaxed memory I/O windows (which virtio-pci does not use). */
32 #define virtio_mb(vq) \
33 do { if ((vq)->weak_barriers) smp_mb(); else mb(); } while(0)
34 #define virtio_rmb(vq) \
35 do { if ((vq)->weak_barriers) smp_rmb(); else rmb(); } while(0)
36 #define virtio_wmb(vq) \
37 do { if ((vq)->weak_barriers) smp_rmb(); else rmb(); } while(0)
39 /* We must force memory ordering even if guest is UP since host could be
40 * running on another CPU, but SMP barriers are defined to barrier() in that
41 * configuration. So fall back to mandatory barriers instead. */
42 #define virtio_mb(vq) mb()
43 #define virtio_rmb(vq) rmb()
44 #define virtio_wmb(vq) wmb()
48 /* For development, we want to crash whenever the ring is screwed. */
49 #define BAD_RING(_vq, fmt, args...) \
51 dev_err(&(_vq)->vq.vdev->dev, \
52 "%s:"fmt, (_vq)->vq.name, ##args); \
55 /* Caller is supposed to guarantee no reentry. */
56 #define START_USE(_vq) \
59 panic("%s:in_use = %i\n", \
60 (_vq)->vq.name, (_vq)->in_use); \
61 (_vq)->in_use = __LINE__; \
63 #define END_USE(_vq) \
64 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
66 #define BAD_RING(_vq, fmt, args...) \
68 dev_err(&_vq->vq.vdev->dev, \
69 "%s:"fmt, (_vq)->vq.name, ##args); \
70 (_vq)->broken = true; \
76 struct vring_virtqueue
80 /* Actual memory layout for this queue */
83 /* Can we use weak barriers? */
86 /* Other side has made a mess, don't try any more. */
89 /* Host supports indirect buffers */
92 /* Host publishes avail event idx */
95 /* Number of free buffers */
96 unsigned int num_free;
97 /* Head of free buffer list. */
98 unsigned int free_head;
99 /* Number we've added since last sync. */
100 unsigned int num_added;
102 /* Last used index we've seen. */
105 /* How to notify other side. FIXME: commonalize hcalls! */
106 void (*notify)(struct virtqueue *vq);
109 /* They're supposed to lock for us. */
113 /* Tokens for callbacks. */
117 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
119 /* Set up an indirect table of descriptors and add it to the queue. */
120 static int vring_add_indirect(struct vring_virtqueue *vq,
121 struct scatterlist sg[],
126 struct vring_desc *desc;
130 desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp);
134 /* Transfer entries from the sg list into the indirect page */
135 for (i = 0; i < out; i++) {
136 desc[i].flags = VRING_DESC_F_NEXT;
137 desc[i].addr = sg_phys(sg);
138 desc[i].len = sg->length;
142 for (; i < (out + in); i++) {
143 desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
144 desc[i].addr = sg_phys(sg);
145 desc[i].len = sg->length;
150 /* Last one doesn't continue. */
151 desc[i-1].flags &= ~VRING_DESC_F_NEXT;
154 /* We're about to use a buffer */
157 /* Use a single buffer which doesn't continue */
158 head = vq->free_head;
159 vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT;
160 vq->vring.desc[head].addr = virt_to_phys(desc);
161 vq->vring.desc[head].len = i * sizeof(struct vring_desc);
163 /* Update free pointer */
164 vq->free_head = vq->vring.desc[head].next;
169 int virtqueue_add_buf_gfp(struct virtqueue *_vq,
170 struct scatterlist sg[],
176 struct vring_virtqueue *vq = to_vvq(_vq);
177 unsigned int i, avail, uninitialized_var(prev);
182 BUG_ON(data == NULL);
184 /* If the host supports indirect descriptor tables, and we have multiple
185 * buffers, then go indirect. FIXME: tune this threshold */
186 if (vq->indirect && (out + in) > 1 && vq->num_free) {
187 head = vring_add_indirect(vq, sg, out, in, gfp);
188 if (likely(head >= 0))
192 BUG_ON(out + in > vq->vring.num);
193 BUG_ON(out + in == 0);
195 if (vq->num_free < out + in) {
196 pr_debug("Can't add buf len %i - avail = %i\n",
197 out + in, vq->num_free);
198 /* FIXME: for historical reasons, we force a notify here if
199 * there are outgoing parts to the buffer. Presumably the
200 * host should service the ring ASAP. */
207 /* We're about to use some buffers from the free list. */
208 vq->num_free -= out + in;
210 head = vq->free_head;
211 for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) {
212 vq->vring.desc[i].flags = VRING_DESC_F_NEXT;
213 vq->vring.desc[i].addr = sg_phys(sg);
214 vq->vring.desc[i].len = sg->length;
218 for (; in; i = vq->vring.desc[i].next, in--) {
219 vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
220 vq->vring.desc[i].addr = sg_phys(sg);
221 vq->vring.desc[i].len = sg->length;
225 /* Last one doesn't continue. */
226 vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT;
228 /* Update free pointer */
233 vq->data[head] = data;
235 /* Put entry in available array (but don't update avail->idx until they
236 * do sync). FIXME: avoid modulus here? */
237 avail = (vq->vring.avail->idx + vq->num_added++) % vq->vring.num;
238 vq->vring.avail->ring[avail] = head;
240 pr_debug("Added buffer head %i to %p\n", head, vq);
245 EXPORT_SYMBOL_GPL(virtqueue_add_buf_gfp);
247 void virtqueue_kick(struct virtqueue *_vq)
249 struct vring_virtqueue *vq = to_vvq(_vq);
252 /* Descriptors and available array need to be set before we expose the
253 * new available array entries. */
256 old = vq->vring.avail->idx;
257 new = vq->vring.avail->idx = old + vq->num_added;
260 /* Need to update avail index before checking if we should notify */
264 vring_need_event(vring_avail_event(&vq->vring), new, old) :
265 !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY))
266 /* Prod other side to tell it about changes. */
271 EXPORT_SYMBOL_GPL(virtqueue_kick);
273 static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
277 /* Clear data ptr. */
278 vq->data[head] = NULL;
280 /* Put back on free list: find end */
283 /* Free the indirect table */
284 if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT)
285 kfree(phys_to_virt(vq->vring.desc[i].addr));
287 while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
288 i = vq->vring.desc[i].next;
292 vq->vring.desc[i].next = vq->free_head;
293 vq->free_head = head;
294 /* Plus final descriptor */
298 static inline bool more_used(const struct vring_virtqueue *vq)
300 return vq->last_used_idx != vq->vring.used->idx;
303 void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
305 struct vring_virtqueue *vq = to_vvq(_vq);
311 if (unlikely(vq->broken)) {
316 if (!more_used(vq)) {
317 pr_debug("No more buffers in queue\n");
322 /* Only get used array entries after they have been exposed by host. */
325 i = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].id;
326 *len = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].len;
328 if (unlikely(i >= vq->vring.num)) {
329 BAD_RING(vq, "id %u out of range\n", i);
332 if (unlikely(!vq->data[i])) {
333 BAD_RING(vq, "id %u is not a head!\n", i);
337 /* detach_buf clears data, so grab it now. */
341 /* If we expect an interrupt for the next entry, tell host
342 * by writing event index and flush out the write before
343 * the read in the next get_buf call. */
344 if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
345 vring_used_event(&vq->vring) = vq->last_used_idx;
352 EXPORT_SYMBOL_GPL(virtqueue_get_buf);
354 void virtqueue_disable_cb(struct virtqueue *_vq)
356 struct vring_virtqueue *vq = to_vvq(_vq);
358 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
360 EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
362 bool virtqueue_enable_cb(struct virtqueue *_vq)
364 struct vring_virtqueue *vq = to_vvq(_vq);
368 /* We optimistically turn back on interrupts, then check if there was
370 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
371 * either clear the flags bit or point the event index at the next
372 * entry. Always do both to keep code simple. */
373 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
374 vring_used_event(&vq->vring) = vq->last_used_idx;
376 if (unlikely(more_used(vq))) {
384 EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
386 bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
388 struct vring_virtqueue *vq = to_vvq(_vq);
393 /* We optimistically turn back on interrupts, then check if there was
395 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
396 * either clear the flags bit or point the event index at the next
397 * entry. Always do both to keep code simple. */
398 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
399 /* TODO: tune this threshold */
400 bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4;
401 vring_used_event(&vq->vring) = vq->last_used_idx + bufs;
403 if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) {
411 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
413 void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
415 struct vring_virtqueue *vq = to_vvq(_vq);
421 for (i = 0; i < vq->vring.num; i++) {
424 /* detach_buf clears data, so grab it now. */
427 vq->vring.avail->idx--;
431 /* That should have freed everything. */
432 BUG_ON(vq->num_free != vq->vring.num);
437 EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
439 irqreturn_t vring_interrupt(int irq, void *_vq)
441 struct vring_virtqueue *vq = to_vvq(_vq);
443 if (!more_used(vq)) {
444 pr_debug("virtqueue interrupt with no work for %p\n", vq);
448 if (unlikely(vq->broken))
451 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
453 vq->vq.callback(&vq->vq);
457 EXPORT_SYMBOL_GPL(vring_interrupt);
459 struct virtqueue *vring_new_virtqueue(unsigned int num,
460 unsigned int vring_align,
461 struct virtio_device *vdev,
464 void (*notify)(struct virtqueue *),
465 void (*callback)(struct virtqueue *),
468 struct vring_virtqueue *vq;
471 /* We assume num is a power of 2. */
472 if (num & (num - 1)) {
473 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
477 vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL);
481 vring_init(&vq->vring, num, pages, vring_align);
482 vq->vq.callback = callback;
486 vq->weak_barriers = weak_barriers;
488 vq->last_used_idx = 0;
490 list_add_tail(&vq->vq.list, &vdev->vqs);
495 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
496 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
498 /* No callback? Tell other side not to bother us. */
500 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
502 /* Put everything in free lists. */
505 for (i = 0; i < num-1; i++) {
506 vq->vring.desc[i].next = i+1;
513 EXPORT_SYMBOL_GPL(vring_new_virtqueue);
515 void vring_del_virtqueue(struct virtqueue *vq)
520 EXPORT_SYMBOL_GPL(vring_del_virtqueue);
522 /* Manipulates transport-specific feature bits. */
523 void vring_transport_features(struct virtio_device *vdev)
527 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
529 case VIRTIO_RING_F_INDIRECT_DESC:
531 case VIRTIO_RING_F_EVENT_IDX:
534 /* We don't understand this bit. */
535 clear_bit(i, vdev->features);
539 EXPORT_SYMBOL_GPL(vring_transport_features);
541 /* return the size of the vring within the virtqueue */
542 unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
545 struct vring_virtqueue *vq = to_vvq(_vq);
547 return vq->vring.num;
549 EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
551 MODULE_LICENSE("GPL");