From b7b038d41a20ab93d98ecc897ecac6b8c2e2cbd6 Mon Sep 17 00:00:00 2001 From: Asias He Date: Mon, 28 Nov 2011 13:34:11 +0800 Subject: [PATCH] kvm tools: Improve virtio blk request processing There are at most bdev->reqs[VIRTIO_BLK_QUEUE_SIZE] outstanding requests at any time. We can simply use the head of each request to fetch the right 'struct blk_dev_req' in bdev->reqs[]. So, we can eliminate the list and lock operations which introduced by virtio_blk_req_{pop, push}. Signed-off-by: Asias He Signed-off-by: Pekka Enberg --- tools/kvm/include/kvm/virtio.h | 3 ++- tools/kvm/virtio/blk.c | 48 +++++++++------------------------- tools/kvm/virtio/core.c | 24 ++++++++++++----- 3 files changed, 31 insertions(+), 44 deletions(-) diff --git a/tools/kvm/include/kvm/virtio.h b/tools/kvm/include/kvm/virtio.h index d117bfc66dba..a7aa0200503e 100644 --- a/tools/kvm/include/kvm/virtio.h +++ b/tools/kvm/include/kvm/virtio.h @@ -58,7 +58,8 @@ static inline void *guest_pfn_to_host(struct kvm *kvm, u32 pfn) struct vring_used_elem *virt_queue__set_used_elem(struct virt_queue *queue, u32 head, u32 len); bool virtio_queue__should_signal(struct virt_queue *vq); -u16 virt_queue__get_iov(struct virt_queue *queue, struct iovec iov[], u16 *out, u16 *in, struct kvm *kvm); +u16 virt_queue__get_iov(struct virt_queue *vq, struct iovec iov[], u16 *out, u16 *in, struct kvm *kvm); +u16 virt_queue__get_head_iov(struct virt_queue *vq, struct iovec iov[], u16 *out, u16 *in, u16 head, struct kvm *kvm); u16 virt_queue__get_inout_iov(struct kvm *kvm, struct virt_queue *queue, struct iovec in_iov[], struct iovec out_iov[], u16 *in, u16 *out); diff --git a/tools/kvm/virtio/blk.c b/tools/kvm/virtio/blk.c index 9495f283bae4..8c6f90b88f5c 100644 --- a/tools/kvm/virtio/blk.c +++ b/tools/kvm/virtio/blk.c @@ -30,7 +30,6 @@ #define NUM_VIRT_QUEUES 1 struct blk_dev_req { - struct list_head list; struct virt_queue *vq; struct blk_dev *bdev; struct iovec iov[VIRTIO_BLK_QUEUE_SIZE]; @@ -57,27 +56,6 @@ struct blk_dev { static LIST_HEAD(bdevs); static int compat_id; -static struct blk_dev_req *virtio_blk_req_pop(struct blk_dev *bdev) -{ - struct blk_dev_req *req = NULL; - - mutex_lock(&bdev->req_mutex); - if (!list_empty(&bdev->req_list)) { - req = list_first_entry(&bdev->req_list, struct blk_dev_req, list); - list_del_init(&req->list); - } - mutex_unlock(&bdev->req_mutex); - - return req; -} - -static void virtio_blk_req_push(struct blk_dev *bdev, struct blk_dev_req *req) -{ - mutex_lock(&bdev->req_mutex); - list_add(&req->list, &bdev->req_list); - mutex_unlock(&bdev->req_mutex); -} - void virtio_blk_complete(void *param, long len) { struct blk_dev_req *req = param; @@ -95,8 +73,6 @@ void virtio_blk_complete(void *param, long len) if (virtio_queue__should_signal(&bdev->vqs[queueid])) bdev->vtrans.trans_ops->signal_vq(req->kvm, &bdev->vtrans, queueid); - - virtio_blk_req_push(req->bdev, req); } static void virtio_blk_do_io_request(struct kvm *kvm, struct blk_dev_req *req) @@ -141,15 +117,14 @@ static void virtio_blk_do_io_request(struct kvm *kvm, struct blk_dev_req *req) static void virtio_blk_do_io(struct kvm *kvm, struct virt_queue *vq, struct blk_dev *bdev) { - while (virt_queue__available(vq)) { - struct blk_dev_req *req = virtio_blk_req_pop(bdev); + struct blk_dev_req *req; + u16 head; - *req = (struct blk_dev_req) { - .vq = vq, - .bdev = bdev, - .kvm = kvm, - }; - req->head = virt_queue__get_iov(vq, req->iov, &req->out, &req->in, kvm); + while (virt_queue__available(vq)) { + head = virt_queue__pop(vq); + req = &bdev->reqs[head]; + req->head = virt_queue__get_head_iov(vq, req->iov, &req->out, &req->in, head, kvm); + req->vq = vq; virtio_blk_do_io_request(kvm, req); } @@ -235,7 +210,7 @@ static struct virtio_ops blk_dev_virtio_ops = (struct virtio_ops) { void virtio_blk__init(struct kvm *kvm, struct disk_image *disk) { struct blk_dev *bdev; - size_t i; + unsigned int i; if (!disk) return; @@ -261,9 +236,10 @@ void virtio_blk__init(struct kvm *kvm, struct disk_image *disk) list_add_tail(&bdev->list, &bdevs); - INIT_LIST_HEAD(&bdev->req_list); - for (i = 0; i < ARRAY_SIZE(bdev->reqs); i++) - list_add(&bdev->reqs[i].list, &bdev->req_list); + for (i = 0; i < ARRAY_SIZE(bdev->reqs); i++) { + bdev->reqs[i].bdev = bdev; + bdev->reqs[i].kvm = kvm; + } disk_image__set_callback(bdev->disk, virtio_blk_complete); diff --git a/tools/kvm/virtio/core.c b/tools/kvm/virtio/core.c index 8032d7a8354e..a6f180ed0a26 100644 --- a/tools/kvm/virtio/core.c +++ b/tools/kvm/virtio/core.c @@ -33,18 +33,18 @@ struct vring_used_elem *virt_queue__set_used_elem(struct virt_queue *queue, u32 return used_elem; } -u16 virt_queue__get_iov(struct virt_queue *queue, struct iovec iov[], u16 *out, u16 *in, struct kvm *kvm) +u16 virt_queue__get_head_iov(struct virt_queue *vq, struct iovec iov[], u16 *out, u16 *in, u16 head, struct kvm *kvm) { struct vring_desc *desc; - u16 head, idx; + u16 idx; - idx = head = virt_queue__pop(queue); + idx = head; *out = *in = 0; do { - desc = virt_queue__get_desc(queue, idx); - iov[*out + *in].iov_base = guest_flat_to_host(kvm, desc->addr); - iov[*out + *in].iov_len = desc->len; + desc = virt_queue__get_desc(vq, idx); + iov[*out + *in].iov_base = guest_flat_to_host(kvm, desc->addr); + iov[*out + *in].iov_len = desc->len; if (desc->flags & VRING_DESC_F_WRITE) (*in)++; else @@ -58,13 +58,22 @@ u16 virt_queue__get_iov(struct virt_queue *queue, struct iovec iov[], u16 *out, return head; } +u16 virt_queue__get_iov(struct virt_queue *vq, struct iovec iov[], u16 *out, u16 *in, struct kvm *kvm) +{ + u16 head; + + head = virt_queue__pop(vq); + + return virt_queue__get_head_iov(vq, iov, out, in, head, kvm); +} + /* in and out are relative to guest */ u16 virt_queue__get_inout_iov(struct kvm *kvm, struct virt_queue *queue, struct iovec in_iov[], struct iovec out_iov[], u16 *in, u16 *out) { - u16 head, idx; struct vring_desc *desc; + u16 head, idx; idx = head = virt_queue__pop(queue); *out = *in = 0; @@ -86,6 +95,7 @@ u16 virt_queue__get_inout_iov(struct kvm *kvm, struct virt_queue *queue, else break; } while (1); + return head; } -- 2.39.5