vesa_pci_device.bar[0] = vesa_base_addr | PCI_BASE_ADDRESS_SPACE_IO;
pci__register(&vesa_pci_device, dev);
- kvm__register_mmio(VESA_MEM_ADDR, VESA_MEM_SIZE, &vesa_mmio_callback);
+ kvm__register_mmio(kvm, VESA_MEM_ADDR, VESA_MEM_SIZE, &vesa_mmio_callback);
mem = calloc(1, VESA_MEM_SIZE);
if (!mem)
u8 is_running;
u8 paused;
+
+ struct kvm_coalesced_mmio_ring *ring;
};
struct kvm_cpu *kvm_cpu__init(struct kvm *kvm, unsigned long cpu_id);
void kvm__irq_line(struct kvm *kvm, int irq, int level);
bool kvm__emulate_io(struct kvm *kvm, u16 port, void *data, int direction, int size, u32 count);
bool kvm__emulate_mmio(struct kvm *kvm, u64 phys_addr, u8 *data, u32 len, u8 is_write);
-bool kvm__register_mmio(u64 phys_addr, u64 phys_addr_len, void (*kvm_mmio_callback_fn)(u64 addr, u8 *data, u32 len, u8 is_write));
-bool kvm__deregister_mmio(u64 phys_addr);
+bool kvm__register_mmio(struct kvm *kvm, u64 phys_addr, u64 phys_addr_len, void (*kvm_mmio_callback_fn)(u64 addr, u8 *data, u32 len, u8 is_write));
+bool kvm__deregister_mmio(struct kvm *kvm, u64 phys_addr);
void kvm__pause(void);
void kvm__continue(void);
void kvm__notify_paused(void);
#include <errno.h>
#include <stdio.h>
+#define PAGE_SIZE (sysconf(_SC_PAGE_SIZE))
+
extern __thread struct kvm_cpu *current_kvm_cpu;
static inline bool is_in_protected_mode(struct kvm_cpu *vcpu)
{
struct kvm_cpu *vcpu;
int mmap_size;
+ int coalesced_offset;
vcpu = kvm_cpu__new(kvm);
if (!vcpu)
if (vcpu->kvm_run == MAP_FAILED)
die("unable to mmap vcpu fd");
+ coalesced_offset = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_COALESCED_MMIO);
+ if (coalesced_offset)
+ vcpu->ring = (void *)vcpu->kvm_run + (coalesced_offset * PAGE_SIZE);
+
vcpu->is_running = true;
return vcpu;
}
}
+static void kvm_cpu__handle_coalesced_mmio(struct kvm_cpu *cpu)
+{
+ if (cpu->ring) {
+ while (cpu->ring->first != cpu->ring->last) {
+ struct kvm_coalesced_mmio *m;
+ m = &cpu->ring->coalesced_mmio[cpu->ring->first];
+ kvm__emulate_mmio(cpu->kvm,
+ m->phys_addr,
+ m->data,
+ m->len,
+ 1);
+ cpu->ring->first = (cpu->ring->first + 1) % KVM_COALESCED_MMIO_MAX;
+ }
+ }
+}
+
int kvm_cpu__start(struct kvm_cpu *cpu)
{
sigset_t sigset;
default:
goto panic_kvm;
}
+ kvm_cpu__handle_coalesced_mmio(cpu);
}
exit_kvm:
#include <stdio.h>
#include <stdlib.h>
+#include <sys/ioctl.h>
+#include <linux/kvm.h>
#include <linux/types.h>
#include <linux/rbtree.h>
return "read";
}
-bool kvm__register_mmio(u64 phys_addr, u64 phys_addr_len, void (*kvm_mmio_callback_fn)(u64 addr, u8 *data, u32 len, u8 is_write))
+bool kvm__register_mmio(struct kvm *kvm, u64 phys_addr, u64 phys_addr_len, void (*kvm_mmio_callback_fn)(u64 addr, u8 *data, u32 len, u8 is_write))
{
struct mmio_mapping *mmio;
+ struct kvm_coalesced_mmio_zone zone;
int ret;
mmio = malloc(sizeof(*mmio));
.kvm_mmio_callback_fn = kvm_mmio_callback_fn,
};
+ zone = (struct kvm_coalesced_mmio_zone) {
+ .addr = phys_addr,
+ .size = phys_addr_len,
+ };
+ ret = ioctl(kvm->vm_fd, KVM_REGISTER_COALESCED_MMIO, &zone);
+ if (ret < 0) {
+ free(mmio);
+ return false;
+ }
+
br_write_lock();
ret = mmio_insert(&mmio_tree, mmio);
br_write_unlock();
return ret;
}
-bool kvm__deregister_mmio(u64 phys_addr)
+bool kvm__deregister_mmio(struct kvm *kvm, u64 phys_addr)
{
struct mmio_mapping *mmio;
+ struct kvm_coalesced_mmio_zone zone;
br_write_lock();
mmio = mmio_search_single(&mmio_tree, phys_addr);
return false;
}
+ zone = (struct kvm_coalesced_mmio_zone) {
+ .addr = phys_addr,
+ .size = 1,
+ };
+ ioctl(kvm->vm_fd, KVM_UNREGISTER_COALESCED_MMIO, &zone);
+
rb_int_erase(&mmio_tree, &mmio->node);
br_write_unlock();