4 * Copyright (c) 2008 Bull S.A.S.
5 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
7 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
13 #include <linux/kvm_host.h>
14 #include <linux/slab.h>
15 #include <linux/kvm.h>
17 #include "coalesced_mmio.h"
19 static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
21 return container_of(dev, struct kvm_coalesced_mmio_dev, dev);
24 static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
27 struct kvm_coalesced_mmio_zone *zone;
30 /* is it in a batchable area ? */
32 for (i = 0; i < dev->nb_zones; i++) {
35 /* (addr,len) is fully included in
36 * (zone->addr, zone->size)
39 if (zone->addr <= addr &&
40 addr + len <= zone->addr + zone->size)
46 static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
48 struct kvm_coalesced_mmio_ring *ring;
51 /* Are we able to batch it ? */
53 /* last is the first free entry
54 * check if we don't meet the first used entry
55 * there is always one unused entry in the buffer
57 ring = dev->kvm->coalesced_mmio_ring;
58 avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
67 static int coalesced_mmio_write(struct kvm_io_device *this,
68 gpa_t addr, int len, const void *val)
70 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
71 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
73 if (!coalesced_mmio_in_range(dev, addr, len))
76 spin_lock(&dev->lock);
78 if (!coalesced_mmio_has_room(dev)) {
79 spin_unlock(&dev->lock);
83 /* copy data in first free entry of the ring */
85 ring->coalesced_mmio[ring->last].phys_addr = addr;
86 ring->coalesced_mmio[ring->last].len = len;
87 memcpy(ring->coalesced_mmio[ring->last].data, val, len);
89 ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
90 spin_unlock(&dev->lock);
94 static void coalesced_mmio_destructor(struct kvm_io_device *this)
96 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
101 static const struct kvm_io_device_ops coalesced_mmio_ops = {
102 .write = coalesced_mmio_write,
103 .destructor = coalesced_mmio_destructor,
106 int kvm_coalesced_mmio_init(struct kvm *kvm)
108 struct kvm_coalesced_mmio_dev *dev;
113 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
116 kvm->coalesced_mmio_ring = page_address(page);
119 dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
122 spin_lock_init(&dev->lock);
123 kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
125 kvm->coalesced_mmio_dev = dev;
127 mutex_lock(&kvm->slots_lock);
128 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &dev->dev);
129 mutex_unlock(&kvm->slots_lock);
136 kvm->coalesced_mmio_dev = NULL;
139 kvm->coalesced_mmio_ring = NULL;
145 void kvm_coalesced_mmio_free(struct kvm *kvm)
147 if (kvm->coalesced_mmio_ring)
148 free_page((unsigned long)kvm->coalesced_mmio_ring);
151 int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
152 struct kvm_coalesced_mmio_zone *zone)
154 struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
159 mutex_lock(&kvm->slots_lock);
160 if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) {
161 mutex_unlock(&kvm->slots_lock);
165 dev->zone[dev->nb_zones] = *zone;
168 mutex_unlock(&kvm->slots_lock);
172 int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
173 struct kvm_coalesced_mmio_zone *zone)
176 struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
177 struct kvm_coalesced_mmio_zone *z;
182 mutex_lock(&kvm->slots_lock);
186 z = &dev->zone[i - 1];
188 /* unregister all zones
189 * included in (zone->addr, zone->size)
192 if (zone->addr <= z->addr &&
193 z->addr + z->size <= zone->addr + zone->size) {
195 *z = dev->zone[dev->nb_zones];
200 mutex_unlock(&kvm->slots_lock);