4 * Copyright (c) 2008 Bull S.A.S.
6 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
12 #include <linux/kvm_host.h>
13 #include <linux/slab.h>
14 #include <linux/kvm.h>
16 #include "coalesced_mmio.h"
18 static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
20 return container_of(dev, struct kvm_coalesced_mmio_dev, dev);
23 static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
26 struct kvm_coalesced_mmio_zone *zone;
27 struct kvm_coalesced_mmio_ring *ring;
31 /* Are we able to batch it ? */
33 /* last is the first free entry
34 * check if we don't meet the first used entry
35 * there is always one unused entry in the buffer
37 ring = dev->kvm->coalesced_mmio_ring;
38 avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
39 if (avail < KVM_MAX_VCPUS) {
44 /* is it in a batchable area ? */
46 for (i = 0; i < dev->nb_zones; i++) {
49 /* (addr,len) is fully included in
50 * (zone->addr, zone->size)
53 if (zone->addr <= addr &&
54 addr + len <= zone->addr + zone->size)
60 static int coalesced_mmio_write(struct kvm_io_device *this,
61 gpa_t addr, int len, const void *val)
63 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
64 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
65 if (!coalesced_mmio_in_range(dev, addr, len))
68 spin_lock(&dev->lock);
70 /* copy data in first free entry of the ring */
72 ring->coalesced_mmio[ring->last].phys_addr = addr;
73 ring->coalesced_mmio[ring->last].len = len;
74 memcpy(ring->coalesced_mmio[ring->last].data, val, len);
76 ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
77 spin_unlock(&dev->lock);
81 static void coalesced_mmio_destructor(struct kvm_io_device *this)
83 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
88 static const struct kvm_io_device_ops coalesced_mmio_ops = {
89 .write = coalesced_mmio_write,
90 .destructor = coalesced_mmio_destructor,
93 int kvm_coalesced_mmio_init(struct kvm *kvm)
95 struct kvm_coalesced_mmio_dev *dev;
100 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
103 kvm->coalesced_mmio_ring = page_address(page);
106 dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
109 spin_lock_init(&dev->lock);
110 kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
112 kvm->coalesced_mmio_dev = dev;
114 mutex_lock(&kvm->slots_lock);
115 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &dev->dev);
116 mutex_unlock(&kvm->slots_lock);
123 kvm->coalesced_mmio_dev = NULL;
126 kvm->coalesced_mmio_ring = NULL;
132 void kvm_coalesced_mmio_free(struct kvm *kvm)
134 if (kvm->coalesced_mmio_ring)
135 free_page((unsigned long)kvm->coalesced_mmio_ring);
138 int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
139 struct kvm_coalesced_mmio_zone *zone)
141 struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
146 mutex_lock(&kvm->slots_lock);
147 if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) {
148 mutex_unlock(&kvm->slots_lock);
152 dev->zone[dev->nb_zones] = *zone;
155 mutex_unlock(&kvm->slots_lock);
159 int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
160 struct kvm_coalesced_mmio_zone *zone)
163 struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
164 struct kvm_coalesced_mmio_zone *z;
169 mutex_lock(&kvm->slots_lock);
173 z = &dev->zone[i - 1];
175 /* unregister all zones
176 * included in (zone->addr, zone->size)
179 if (zone->addr <= z->addr &&
180 z->addr + z->size <= zone->addr + zone->size) {
182 *z = dev->zone[dev->nb_zones];
187 mutex_unlock(&kvm->slots_lock);