while (cpu->ring->first != cpu->ring->last) {
struct kvm_coalesced_mmio *m;
m = &cpu->ring->coalesced_mmio[cpu->ring->first];
- kvm__emulate_mmio(cpu->kvm,
- m->phys_addr,
- m->data,
- m->len,
- 1);
+ kvm_cpu__emulate_mmio(cpu->kvm,
+ m->phys_addr,
+ m->data,
+ m->len,
+ 1);
cpu->ring->first = (cpu->ring->first + 1) % KVM_COALESCED_MMIO_MAX;
}
}
case KVM_EXIT_IO: {
bool ret;
- ret = kvm__emulate_io(cpu->kvm,
- cpu->kvm_run->io.port,
- (u8 *)cpu->kvm_run +
- cpu->kvm_run->io.data_offset,
- cpu->kvm_run->io.direction,
- cpu->kvm_run->io.size,
- cpu->kvm_run->io.count);
+ ret = kvm_cpu__emulate_io(cpu->kvm,
+ cpu->kvm_run->io.port,
+ (u8 *)cpu->kvm_run +
+ cpu->kvm_run->io.data_offset,
+ cpu->kvm_run->io.direction,
+ cpu->kvm_run->io.size,
+ cpu->kvm_run->io.count);
if (!ret)
goto panic_kvm;
case KVM_EXIT_MMIO: {
bool ret;
- ret = kvm__emulate_mmio(cpu->kvm,
- cpu->kvm_run->mmio.phys_addr,
- cpu->kvm_run->mmio.data,
- cpu->kvm_run->mmio.len,
- cpu->kvm_run->mmio.is_write);
+ ret = kvm_cpu__emulate_mmio(cpu->kvm,
+ cpu->kvm_run->mmio.phys_addr,
+ cpu->kvm_run->mmio.data,
+ cpu->kvm_run->mmio.len,
+ cpu->kvm_run->mmio.is_write);
if (!ret)
goto panic_kvm;
/* Architecture-specific kvm_cpu definitions. */
#include <linux/kvm.h> /* for struct kvm_regs */
-
+#include "kvm/kvm.h" /* for kvm__emulate_{mm}io() */
+#include <stdbool.h>
#include <pthread.h>
struct kvm;
struct kvm_coalesced_mmio_ring *ring;
};
+/*
+ * As these are such simple wrappers, let's have them in the header so they'll
+ * be cheaper to call:
+ */
+static inline bool kvm_cpu__emulate_io(struct kvm *kvm, u16 port, void *data, int direction, int size, u32 count)
+{
+ return kvm__emulate_io(kvm, port, data, direction, size, count);
+}
+
+static inline bool kvm_cpu__emulate_mmio(struct kvm *kvm, u64 phys_addr, u8 *data, u32 len, u8 is_write)
+{
+ return kvm__emulate_mmio(kvm, phys_addr, data, len, is_write);
+}
+
#endif /* KVM__KVM_CPU_ARCH_H */