From: Paolo Bonzini Date: Fri, 23 Jan 2015 12:39:51 +0000 (+0100) Subject: Merge tag 'kvm-arm-for-3.20' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm... X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=1c6007d59a20762052cc92c0a2889ff11030d23a;p=linux-beck.git Merge tag 'kvm-arm-for-3.20' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into kvm-next KVM/ARM changes for v3.20 including GICv3 emulation, dirty page logging, added trace symbols, and adding an explicit VGIC init device control IOCTL. Conflicts: arch/arm64/include/asm/kvm_arm.h arch/arm64/kvm/handle_exit.c --- 1c6007d59a20762052cc92c0a2889ff11030d23a diff --cc arch/arm64/include/asm/esr.h index 62167090937d,72674f4c3871..92bbae381598 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@@ -18,89 -18,40 +18,90 @@@ #ifndef __ASM_ESR_H #define __ASM_ESR_H -#define ESR_EL1_WRITE (1 << 6) -#define ESR_EL1_CM (1 << 8) -#define ESR_EL1_IL (1 << 25) +#define ESR_ELx_EC_UNKNOWN (0x00) +#define ESR_ELx_EC_WFx (0x01) +/* Unallocated EC: 0x02 */ +#define ESR_ELx_EC_CP15_32 (0x03) +#define ESR_ELx_EC_CP15_64 (0x04) +#define ESR_ELx_EC_CP14_MR (0x05) +#define ESR_ELx_EC_CP14_LS (0x06) +#define ESR_ELx_EC_FP_ASIMD (0x07) +#define ESR_ELx_EC_CP10_ID (0x08) +/* Unallocated EC: 0x09 - 0x0B */ +#define ESR_ELx_EC_CP14_64 (0x0C) +/* Unallocated EC: 0x0d */ +#define ESR_ELx_EC_ILL (0x0E) +/* Unallocated EC: 0x0F - 0x10 */ +#define ESR_ELx_EC_SVC32 (0x11) +#define ESR_ELx_EC_HVC32 (0x12) +#define ESR_ELx_EC_SMC32 (0x13) +/* Unallocated EC: 0x14 */ +#define ESR_ELx_EC_SVC64 (0x15) +#define ESR_ELx_EC_HVC64 (0x16) +#define ESR_ELx_EC_SMC64 (0x17) +#define ESR_ELx_EC_SYS64 (0x18) +/* Unallocated EC: 0x19 - 0x1E */ +#define ESR_ELx_EC_IMP_DEF (0x1f) +#define ESR_ELx_EC_IABT_LOW (0x20) +#define ESR_ELx_EC_IABT_CUR (0x21) +#define ESR_ELx_EC_PC_ALIGN (0x22) +/* Unallocated EC: 0x23 */ +#define ESR_ELx_EC_DABT_LOW (0x24) +#define ESR_ELx_EC_DABT_CUR (0x25) +#define ESR_ELx_EC_SP_ALIGN (0x26) +/* Unallocated EC: 0x27 */ +#define ESR_ELx_EC_FP_EXC32 (0x28) +/* Unallocated EC: 0x29 - 0x2B */ +#define ESR_ELx_EC_FP_EXC64 (0x2C) +/* Unallocated EC: 0x2D - 0x2E */ +#define ESR_ELx_EC_SERROR (0x2F) +#define ESR_ELx_EC_BREAKPT_LOW (0x30) +#define ESR_ELx_EC_BREAKPT_CUR (0x31) +#define ESR_ELx_EC_SOFTSTP_LOW (0x32) +#define ESR_ELx_EC_SOFTSTP_CUR (0x33) +#define ESR_ELx_EC_WATCHPT_LOW (0x34) +#define ESR_ELx_EC_WATCHPT_CUR (0x35) +/* Unallocated EC: 0x36 - 0x37 */ +#define ESR_ELx_EC_BKPT32 (0x38) +/* Unallocated EC: 0x39 */ +#define ESR_ELx_EC_VECTOR32 (0x3A) +/* Unallocted EC: 0x3B */ +#define ESR_ELx_EC_BRK64 (0x3C) +/* Unallocated EC: 0x3D - 0x3F */ +#define ESR_ELx_EC_MAX (0x3F) -#define ESR_EL1_EC_SHIFT (26) -#define ESR_EL1_EC_UNKNOWN (0x00) -#define ESR_EL1_EC_WFI (0x01) -#define ESR_EL1_EC_CP15_32 (0x03) -#define ESR_EL1_EC_CP15_64 (0x04) -#define ESR_EL1_EC_CP14_MR (0x05) -#define ESR_EL1_EC_CP14_LS (0x06) -#define ESR_EL1_EC_FP_ASIMD (0x07) -#define ESR_EL1_EC_CP10_ID (0x08) -#define ESR_EL1_EC_CP14_64 (0x0C) -#define ESR_EL1_EC_ILL_ISS (0x0E) -#define ESR_EL1_EC_SVC32 (0x11) -#define ESR_EL1_EC_SVC64 (0x15) -#define ESR_EL1_EC_SYS64 (0x18) -#define ESR_EL1_EC_IABT_EL0 (0x20) -#define ESR_EL1_EC_IABT_EL1 (0x21) -#define ESR_EL1_EC_PC_ALIGN (0x22) -#define ESR_EL1_EC_DABT_EL0 (0x24) -#define ESR_EL1_EC_DABT_EL1 (0x25) -#define ESR_EL1_EC_SP_ALIGN (0x26) -#define ESR_EL1_EC_FP_EXC32 (0x28) -#define ESR_EL1_EC_FP_EXC64 (0x2C) -#define ESR_EL1_EC_SERROR (0x2F) -#define ESR_EL1_EC_BREAKPT_EL0 (0x30) -#define ESR_EL1_EC_BREAKPT_EL1 (0x31) -#define ESR_EL1_EC_SOFTSTP_EL0 (0x32) -#define ESR_EL1_EC_SOFTSTP_EL1 (0x33) -#define ESR_EL1_EC_WATCHPT_EL0 (0x34) -#define ESR_EL1_EC_WATCHPT_EL1 (0x35) -#define ESR_EL1_EC_BKPT32 (0x38) -#define ESR_EL1_EC_BRK64 (0x3C) +#define ESR_ELx_EC_SHIFT (26) +#define ESR_ELx_EC_MASK (UL(0x3F) << ESR_ELx_EC_SHIFT) + +#define ESR_ELx_IL (UL(1) << 25) +#define ESR_ELx_ISS_MASK (ESR_ELx_IL - 1) +#define ESR_ELx_ISV (UL(1) << 24) +#define ESR_ELx_SAS_SHIFT (22) +#define ESR_ELx_SAS (UL(3) << ESR_ELx_SAS_SHIFT) +#define ESR_ELx_SSE (UL(1) << 21) +#define ESR_ELx_SRT_SHIFT (16) +#define ESR_ELx_SRT_MASK (UL(0x1F) << ESR_ELx_SRT_SHIFT) +#define ESR_ELx_SF (UL(1) << 15) +#define ESR_ELx_AR (UL(1) << 14) +#define ESR_ELx_EA (UL(1) << 9) +#define ESR_ELx_CM (UL(1) << 8) +#define ESR_ELx_S1PTW (UL(1) << 7) +#define ESR_ELx_WNR (UL(1) << 6) +#define ESR_ELx_FSC (0x3F) +#define ESR_ELx_FSC_TYPE (0x3C) +#define ESR_ELx_FSC_EXTABT (0x10) +#define ESR_ELx_FSC_FAULT (0x04) +#define ESR_ELx_FSC_PERM (0x0C) +#define ESR_ELx_CV (UL(1) << 24) +#define ESR_ELx_COND_SHIFT (20) +#define ESR_ELx_COND_MASK (UL(0xF) << ESR_ELx_COND_SHIFT) +#define ESR_ELx_WFx_ISS_WFE (UL(1) << 0) ++#define ESR_ELx_xVC_IMM_MASK ((1UL << 16) - 1) + +#ifndef __ASSEMBLY__ +#include + +const char *esr_get_class_string(u32 esr); +#endif /* __ASSEMBLY */ #endif /* __ASM_ESR_H */ diff --cc arch/arm64/include/asm/kvm_emulate.h index 5c56c0d2cef1,b3f1defcb081..c3baa971edab --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@@ -23,12 -23,11 +23,13 @@@ #define __ARM64_KVM_EMULATE_H__ #include -#include + +#include #include +#include #include #include + #include unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num); unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu); @@@ -128,9 -127,14 +129,14 @@@ static inline phys_addr_t kvm_vcpu_get_ return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8; } + static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu) + { - return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_HVC_IMM_MASK; ++ return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK; + } + static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu) { - return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_ISV); + return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV); } static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) @@@ -186,12 -190,12 +192,12 @@@ static inline u8 kvm_vcpu_trap_get_faul static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu) { - return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE; + return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE; } - static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu) + static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu) { - return vcpu_sys_reg(vcpu, MPIDR_EL1); + return vcpu_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK; } static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) diff --cc arch/arm64/kvm/handle_exit.c index 29b184a8f3f8,6a7eb3ce7027..524fa25671fc --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@@ -63,10 -67,13 +69,13 @@@ static int handle_smc(struct kvm_vcpu * */ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run) { - if (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WFx_ISS_WFE) - if (kvm_vcpu_get_hsr(vcpu) & ESR_EL2_EC_WFI_ISS_WFE) { ++ if (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WFx_ISS_WFE) { + trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true); kvm_vcpu_on_spin(vcpu); - else + } else { + trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false); kvm_vcpu_block(vcpu); + } kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));