From f9f09f7c33bfa2f9c2115105d501f566912a37e1 Mon Sep 17 00:00:00 2001 From: Matt Evans Date: Tue, 13 Dec 2011 17:21:47 +1100 Subject: [PATCH] kvm tools: Create arch-specific kvm_cpu__emulate_{mm}io() Different architectures will deal with MMIO exits differently. For example, KVM_EXIT_IO is x86-specific, and I/O cycles are often synthesised by steering into windows in PCI bridges on other architectures. This patch calls arch-specific kvm_cpu__emulate_io() and kvm_cpu__emulate_mmio() from the main runloop's IO and MMIO exit handlers. For x86, these directly call kvm__emulate_io() and kvm__emulate_mmio() but other architectures will perform some address munging before passing on the call. Signed-off-by: Matt Evans Signed-off-by: Pekka Enberg --- tools/kvm/kvm-cpu.c | 34 ++++++++++++------------ tools/kvm/x86/include/kvm/kvm-cpu-arch.h | 17 +++++++++++- 2 files changed, 33 insertions(+), 18 deletions(-) diff --git a/tools/kvm/kvm-cpu.c b/tools/kvm/kvm-cpu.c index 8ec4efa3c8ac..b7ae3d3eadfd 100644 --- a/tools/kvm/kvm-cpu.c +++ b/tools/kvm/kvm-cpu.c @@ -52,11 +52,11 @@ static void kvm_cpu__handle_coalesced_mmio(struct kvm_cpu *cpu) while (cpu->ring->first != cpu->ring->last) { struct kvm_coalesced_mmio *m; m = &cpu->ring->coalesced_mmio[cpu->ring->first]; - kvm__emulate_mmio(cpu->kvm, - m->phys_addr, - m->data, - m->len, - 1); + kvm_cpu__emulate_mmio(cpu->kvm, + m->phys_addr, + m->data, + m->len, + 1); cpu->ring->first = (cpu->ring->first + 1) % KVM_COALESCED_MMIO_MAX; } } @@ -111,13 +111,13 @@ int kvm_cpu__start(struct kvm_cpu *cpu) case KVM_EXIT_IO: { bool ret; - ret = kvm__emulate_io(cpu->kvm, - cpu->kvm_run->io.port, - (u8 *)cpu->kvm_run + - cpu->kvm_run->io.data_offset, - cpu->kvm_run->io.direction, - cpu->kvm_run->io.size, - cpu->kvm_run->io.count); + ret = kvm_cpu__emulate_io(cpu->kvm, + cpu->kvm_run->io.port, + (u8 *)cpu->kvm_run + + cpu->kvm_run->io.data_offset, + cpu->kvm_run->io.direction, + cpu->kvm_run->io.size, + cpu->kvm_run->io.count); if (!ret) goto panic_kvm; @@ -126,11 +126,11 @@ int kvm_cpu__start(struct kvm_cpu *cpu) case KVM_EXIT_MMIO: { bool ret; - ret = kvm__emulate_mmio(cpu->kvm, - cpu->kvm_run->mmio.phys_addr, - cpu->kvm_run->mmio.data, - cpu->kvm_run->mmio.len, - cpu->kvm_run->mmio.is_write); + ret = kvm_cpu__emulate_mmio(cpu->kvm, + cpu->kvm_run->mmio.phys_addr, + cpu->kvm_run->mmio.data, + cpu->kvm_run->mmio.len, + cpu->kvm_run->mmio.is_write); if (!ret) goto panic_kvm; diff --git a/tools/kvm/x86/include/kvm/kvm-cpu-arch.h b/tools/kvm/x86/include/kvm/kvm-cpu-arch.h index 822d966e9a8b..198efe68a6f0 100644 --- a/tools/kvm/x86/include/kvm/kvm-cpu-arch.h +++ b/tools/kvm/x86/include/kvm/kvm-cpu-arch.h @@ -4,7 +4,8 @@ /* Architecture-specific kvm_cpu definitions. */ #include /* for struct kvm_regs */ - +#include "kvm/kvm.h" /* for kvm__emulate_{mm}io() */ +#include #include struct kvm; @@ -31,4 +32,18 @@ struct kvm_cpu { struct kvm_coalesced_mmio_ring *ring; }; +/* + * As these are such simple wrappers, let's have them in the header so they'll + * be cheaper to call: + */ +static inline bool kvm_cpu__emulate_io(struct kvm *kvm, u16 port, void *data, int direction, int size, u32 count) +{ + return kvm__emulate_io(kvm, port, data, direction, size, count); +} + +static inline bool kvm_cpu__emulate_mmio(struct kvm *kvm, u64 phys_addr, u8 *data, u32 len, u8 is_write) +{ + return kvm__emulate_mmio(kvm, phys_addr, data, len, is_write); +} + #endif /* KVM__KVM_CPU_ARCH_H */ -- 2.39.5